From cc7a1d37177c1558b9e590d3393f128af7ecfd92 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 29 Oct 2021 19:17:50 +0200 Subject: [PATCH 001/113] Remove non-collection (legacy) samples Signed-off-by: Achim Christ --- samples/legacy/daemon_admin_network | 13 -- samples/legacy/hosts | 6 - samples/legacy/playbook_aws.yml | 195 ------------------- samples/legacy/playbook_callhome.yml | 27 --- samples/legacy/playbook_ces.yml | 29 --- samples/legacy/playbook_ces_hdfs.yml | 28 --- samples/legacy/playbook_ces_object.yml | 23 --- samples/legacy/playbook_cloud.yml | 29 --- samples/legacy/playbook_directory.yml | 20 -- samples/legacy/playbook_fileauditlogging.yml | 24 --- samples/legacy/playbook_json_ces.yml | 38 ---- samples/legacy/playbook_localpkg.yml | 22 --- samples/legacy/playbook_nodeclass.yml | 46 ----- samples/legacy/playbook_remote_mount.yml | 27 --- samples/legacy/playbook_remotepkg.yml | 20 -- samples/legacy/playbook_repository.yml | 25 --- samples/legacy/playbook_storage.yml | 23 --- samples/legacy/vars | 1 - 18 files changed, 596 deletions(-) delete mode 100644 samples/legacy/daemon_admin_network delete mode 100644 samples/legacy/hosts delete mode 100644 samples/legacy/playbook_aws.yml delete mode 100644 samples/legacy/playbook_callhome.yml delete mode 100644 samples/legacy/playbook_ces.yml delete mode 100644 samples/legacy/playbook_ces_hdfs.yml delete mode 100644 samples/legacy/playbook_ces_object.yml delete mode 100644 samples/legacy/playbook_cloud.yml delete mode 100644 samples/legacy/playbook_directory.yml delete mode 100644 samples/legacy/playbook_fileauditlogging.yml delete mode 100644 samples/legacy/playbook_json_ces.yml delete mode 100644 samples/legacy/playbook_localpkg.yml delete mode 100644 samples/legacy/playbook_nodeclass.yml delete mode 100644 samples/legacy/playbook_remote_mount.yml delete mode 100644 samples/legacy/playbook_remotepkg.yml delete mode 100644 samples/legacy/playbook_repository.yml delete mode 100644 samples/legacy/playbook_storage.yml delete mode 120000 samples/legacy/vars diff --git a/samples/legacy/daemon_admin_network b/samples/legacy/daemon_admin_network deleted file mode 100644 index a879bcb7..00000000 --- a/samples/legacy/daemon_admin_network +++ /dev/null @@ -1,13 +0,0 @@ -hosts: -# Sample parameter for the host file for deploying IBM Spectrum Scale (GPFS) cluster -# with admin and daemon network. -# -# To allow ssh to the cluster with the defined scale_admin_nodename only, the sshd_config -# needs to be updated. To allow update of sshd_config set the variables -# scale_prepare_enable_ssh_login and scale_prepare_restrict_ssh_address to true -# (see roles/core/precheck/defaults/main.yml). - -[cluster01] -scale01 scale_admin_nodename=scale01 scale_daemon_nodename=scale01d -scale02 scale_admin_nodename=scale02 scale_daemon_nodename=scale02d - diff --git a/samples/legacy/hosts b/samples/legacy/hosts deleted file mode 100644 index c1899ef3..00000000 --- a/samples/legacy/hosts +++ /dev/null @@ -1,6 +0,0 @@ -# hosts: -# Sample host file for deploying IBM Spectrum Scale (GPFS) cluster -[cluster01] -host-vm1 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=true -host-vm2 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false -host-vm3 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false diff --git a/samples/legacy/playbook_aws.yml b/samples/legacy/playbook_aws.yml deleted file mode 100644 index 3b7f0ac8..00000000 --- a/samples/legacy/playbook_aws.yml +++ /dev/null @@ -1,195 +0,0 @@ ---- -# -# samples/playbook_cloud.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# inventory in JSON format. - -# This file is mandatory to import and it will load inventory variables form -# vars/scale_clusterdefinition.json -- import_playbook: "set_json_variables.yml" - -# Ensure provisioned VMs are up and Passwordless SSH setup -# has been compleated and operational -- name: Check passwordless SSH connection is setup - hosts: scale_node - any_errors_fatal: true - gather_facts: false - connection: local - tasks: - - name: Check passwordless SSH on all scale inventory hosts - shell: ssh -i {{ ansible_ssh_private_key_file }} {{ inventory_hostname }} "echo PASSWDLESS_SSH_ENABLED" - register: result - until: result.stdout.find("PASSWDLESS_SSH_ENABLED") != -1 - retries: 30 - delay: 10 - -# Ensure all provisioned VMs are running the supported OS versions -- name: Check for supported OS - hosts: scale_node - any_errors_fatal: true - gather_facts: true - tasks: - - name: Spectrum Scale Precheck | Check OS Distribution - assert: - that: - - ansible_distribution == "RedHat" - - ansible_distribution_major_version == "7" or ansible_distribution_major_version == "8" - - (ansible_distribution_version is match("7.7") or - ansible_distribution_version is match("7.8") or - ansible_distribution_version is match("8.1") or - ansible_distribution_version is match("8.2")) - fail_msg: "Only instances running RedHat Enterprise Linux version 7.7, 7.8, 8.1 and 8.2 are supported" - -# Setup Spectrum Scale on nodes and create cluster -- hosts: scale_node - any_errors_fatal: true - vars: - - scale_install_directory_pkg_path: /opt/IBM/gpfs_cloud_rpms - roles: - - core/precheck - - core/node - - core/cluster - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck - - # Cloud deployment specific actions after Spectrum Scale - # cluster installation and setup - tasks: - - block: - - name: accept client lisence for compute descriptor node - command: /usr/lpp/mmfs/bin/mmchnode --client -N "computedescnodegrp" - - - name: set filesystem - set_fact: - fs_name: "{{ scale_storage.0.filesystem }}" - when: - - scale_storage is defined - - - name: create empty file on descriptor node - command: /usr/lpp/mmfs/bin/mmdsh -N "computedescnodegrp" touch /var/mmfs/etc/ignoreAnyMount.{{ fs_name }} - - - name: unmount filesystem on descriptor node - command: /usr/lpp/mmfs/bin/mmumount {{ fs_name }} -N "computedescnodegrp" - run_once: true - when: - - scale_sync_replication_config | bool - - - name: Prevent kernel upgrade - lineinfile: - path: /etc/yum.conf - line: exclude=kernel* redhat-release* - -# Configure the Spectrum Scale Pagepool setings -- hosts: scale_node - any_errors_fatal: false - gather_facts: true - tasks: - - block: - - name: Spectrum Scale Config | Find Compute Nodes - add_host: - name: "{{ item }}" - groups: scale_compute_members - when: - - hostvars[item]['scale_nodeclass'] is defined and 'computenodegrp' in hostvars[item]['scale_nodeclass'] - with_items: "{{ ansible_play_hosts }}" - changed_when: false - - - name: Spectrum Scale Config | Find Storage Nodes - add_host: - name: "{{ item }}" - groups: scale_storage_members - when: - - hostvars[item]['scale_nodeclass'] is defined and 'storagenodegrp' in hostvars[item]['scale_nodeclass'] - with_items: "{{ ansible_play_hosts }}" - changed_when: false - - - name: Spectrum Scale Config | Determine Compute Node Total Memory - set_fact: - scale_compute_total_mem: "{{ hostvars[item]['ansible_memtotal_mb'] }}" - when: hostvars[item]['ansible_memtotal_mb'] is defined and hostvars[item]['ansible_memtotal_mb'] - with_items: "{{ groups['scale_compute_members'].0 }}" - run_once: true - - - name: Spectrum Scale Config | Determine Storage Node Total Memory - set_fact: - scale_storage_total_mem: "{{ hostvars[item]['ansible_memtotal_mb'] }}" - when: hostvars[item]['ansible_memtotal_mb'] is defined and hostvars[item]['ansible_memtotal_mb'] - with_items: "{{ groups['scale_storage_members'].0 }}" - run_once: true - - - name: Spectrum Scale Config | Determine Compute Node Pagepool Memory - set_fact: - scale_compute_total_mem_per: "{{ ((scale_compute_total_mem | int / 1024) * 0.25) | round(0, 'ceil') | int | abs }}" - when: scale_compute_total_mem is defined - run_once: true - - - name: Spectrum Scale Config | Determine Storage Node Pagepool Memory - set_fact: - scale_storage_total_mem_per: "{{ ((scale_storage_total_mem | int / 1024) * 0.25) | round(0, 'ceil') | int | abs }}" - when: scale_storage_total_mem is defined - run_once: true - - - name: Spectrum Scale Config | Define Compute Raw Pagepool Size - set_fact: - pagepool_compute: "{{ scale_compute_total_mem_per }}" - when: scale_compute_total_mem_per is defined - run_once: true - - - name: Spectrum Scale Config | Define Storage Raw Pagepool Size - set_fact: - pagepool_storage: "{{ scale_storage_total_mem_per }}" - when: scale_storage_total_mem_per is defined - run_once: true - - - name: Spectrum Scale Config | Check Compute Pagepool Floor Value - set_fact: - pagepool_compute: "1" - when: - - pagepool_compute is defined - - pagepool_compute | int < 1 - run_once: true - - - name: Spectrum Scale Config | Check Compute Pagepool Ceiling Value - set_fact: - pagepool_compute: "16" - when: - - pagepool_compute is defined - - pagepool_compute | int > 16 - run_once: true - - - name: Spectrum Scale Config | Check Storage Pagepool Floor Value - set_fact: - pagepool_storage: "1" - when: - - pagepool_storage is defined - - pagepool_storage | int < 1 - run_once: true - - - name: Spectrum Scale Config | Check Storage Pagepool Ceiling Value - set_fact: - pagepool_compute: "16" - when: - - pagepool_storage is defined - - pagepool_storage | int > 16 - run_once: true - - - name: Spectrum Scale Config | Assign Compute Pagepool - command: "/usr/lpp/mmfs/bin/mmchconfig pagepool={{ pagepool_compute }}G -i -N computenodegrp" - when: - - pagepool_compute is defined - run_once: true - - - name: Spectrum Scale Config | Assign Storage Pagepool - command: "/usr/lpp/mmfs/bin/mmchconfig pagepool={{ pagepool_storage }}G -i -N storagenodegrp" - when: - - pagepool_storage is defined - run_once: true - diff --git a/samples/legacy/playbook_callhome.yml b/samples/legacy/playbook_callhome.yml deleted file mode 100644 index 6c833399..00000000 --- a/samples/legacy/playbook_callhome.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# -# samples/playbook_callhome.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Call Home -# enabled. Additional variables need to be defined for this, it is recommended -# to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/callhome_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: callhome_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - callhome/precheck - - callhome/node - - callhome/cluster - - callhome/postcheck diff --git a/samples/legacy/playbook_ces.yml b/samples/legacy/playbook_ces.yml deleted file mode 100644 index d36c67aa..00000000 --- a/samples/legacy/playbook_ces.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# -# samples/playbook_ces.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Cluster -# Export Services (CES). Additional variables need to be defined for this, it is -# recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/ces_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: ces_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - nfs/precheck - - nfs/node - - nfs/cluster - - smb/precheck - - smb/node - - smb/cluster diff --git a/samples/legacy/playbook_ces_hdfs.yml b/samples/legacy/playbook_ces_hdfs.yml deleted file mode 100644 index afdfe2fb..00000000 --- a/samples/legacy/playbook_ces_hdfs.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# -# samples/playbook_ces_hdfs.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Cluster -# Export Services (CES). Additional variables need to be defined for this, it is -# recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/hdfs_cluster_vars.yml - -- hosts: cluster01 - any_errors_fatal: true - vars: - - scale_version: 5.1.1.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.1.0-x86_64-Linux-install - pre_tasks: - - include_vars: hdfs_cluster_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_hdfs/precheck - - scale_hdfs/node - - scale_hdfs/cluster - - scale_hdfs/postcheck \ No newline at end of file diff --git a/samples/legacy/playbook_ces_object.yml b/samples/legacy/playbook_ces_object.yml deleted file mode 100644 index 808a442e..00000000 --- a/samples/legacy/playbook_ces_object.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# samples/playbook_ces_object.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Cluster -# Export Services (CES). Additional variables need to be defined for this, it is -# recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/scale_object_vars.yml - -- hosts: cluster01 - vars: - - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.1.0-x86_64-Linux-install - pre_tasks: - - include_vars: scale_object_vars.yml - roles: - - scale_object/precheck - - scale_object/node - - scale_object/cluster - - scale_object/postcheck - diff --git a/samples/legacy/playbook_cloud.yml b/samples/legacy/playbook_cloud.yml deleted file mode 100644 index 82c89ddc..00000000 --- a/samples/legacy/playbook_cloud.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# -# samples/playbook_cloud.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# inventory in JSON format. - -# This file is mandatory to import and it will load inventory variables form -# vars/scale_clusterdefinition.json -- import_playbook: "set_json_variables.yml" - -# Setup Spectrum Scale on nodes and create cluster -- hosts: scale_node - any_errors_fatal: true - vars: - - scale_install_directory_pkg_path: /opt/IBM/gpfs_cloud_rpms - roles: - - core/precheck - - core/node - - core/cluster - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck diff --git a/samples/legacy/playbook_directory.yml b/samples/legacy/playbook_directory.yml deleted file mode 100644 index 7372a97c..00000000 --- a/samples/legacy/playbook_directory.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# -# samples/playbook_directory.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using the -# directory installation method. You need to keep all required Spectrum Scale -# packages in a single user-provided directory. - -# Note that specifying the variable 'scale_version' is *not* required for this -# installation method. - -- hosts: cluster01 - vars: - - scale_install_directory_pkg_path: /root/spectrum_scale_packages - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_fileauditlogging.yml b/samples/legacy/playbook_fileauditlogging.yml deleted file mode 100644 index 93f8705d..00000000 --- a/samples/legacy/playbook_fileauditlogging.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# -# samples/playbook_fileauditlogging.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with File -# Audit Logging (FAL) enabled. - -# Sample definitions can be found in samples/vars/fal_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: fal_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_fileauditlogging/precheck - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster diff --git a/samples/legacy/playbook_json_ces.yml b/samples/legacy/playbook_json_ces.yml deleted file mode 100644 index 04798032..00000000 --- a/samples/legacy/playbook_json_ces.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# -# samples/playbook_json_ces.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# inventory in JSON format. This sample also contains protocols (NFS & SMB), -# callhome and file audit logging. - -# This file is mandatory to import and it will load inventory variables form -# samples/vars/scale_clusterdefinition.json -- import_playbook: set_json_variables.yml - -- hosts: scale_node - any_errors_fatal: true - vars: - - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.0.0-x86_64-Linux-install - roles: - - core/precheck - - core/node - - core/cluster - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck - - callhome/precheck - - callhome/cluster - - nfs/precheck - - nfs/node - - nfs/cluster - - smb/node - - smb/cluster - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster diff --git a/samples/legacy/playbook_localpkg.yml b/samples/legacy/playbook_localpkg.yml deleted file mode 100644 index 78b359fd..00000000 --- a/samples/legacy/playbook_localpkg.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# -# samples/playbook_localpkg.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using local -# archive installation method. This means that the self-extracting archive -# containing the Spectrum Scale code is accessible on the Ansible control -# machine running the playbook. Integrity of the archive will be validated by -# comparing checksums with a *.md5 reference file (if present), the archive will -# be copied to each managed node in your cluster -# ('scale_install_localpkg_tmpdir_path'), and subsequently the archive will be -# extracted. Packages will then be installed from the local files on each node. - -- hosts: cluster01 - vars: - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_nodeclass.yml b/samples/legacy/playbook_nodeclass.yml deleted file mode 100644 index 06931b28..00000000 --- a/samples/legacy/playbook_nodeclass.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -# -# samples/playbook_nodeclass.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with node -# classes and Spectrum Scale configuration attributes. Node classes can be -# defined on a per-node basis by defining the `scale_nodeclass` variable, it is -# recommended to use Ansible host variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-one-machine-host-variables - -# Here is an example definition of these node classes for hosts 'scale01' and -# 'scale02': -# ``` -# # host_vars/scale01: -# --- -# scale_nodeclass: -# - classA -# - classB -# ``` -# ``` -# # host_vars/scale02: -# --- -# scale_nodeclass: -# - classA -# - classC -# ``` - -# These node classes can optionally be used to define Spectrum Scale -# configuration attributes. It is recommended to use Ansible group variables for -# this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/config_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: config_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_remote_mount.yml b/samples/legacy/playbook_remote_mount.yml deleted file mode 100644 index 04c92735..00000000 --- a/samples/legacy/playbook_remote_mount.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# -# samples/playbook_remote_mount.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Remote_Mount -# enabled. Additional variables need to be defined for this, it is recommended -# to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -- hosts: localhost - vars: - - scale_remotemount_client_gui_username: admin - - scale_remotemount_client_gui_password: Admin@GUI - - scale_remotemount_client_gui_hostname: 10.10.10.10 - - scale_remotemount_storage_gui_username: fs1 - - scale_remotemount_client_remotemount_path: "/mnt/{{ scale_remotemount_client_filesystem_name }}" - - scale_remotemount_storage_gui_username: "{{ scale_remotemount_client_gui_username }}" - - scale_remotemount_storage_gui_password: "{{ scale_remotemount_client_gui_password }}" - - scale_remotemount_storage_gui_hostname: 10.10.10.20 - - scale_remotemount_storage_filesystem_name: gpfs01 - pre_tasks: - roles: - - remote_mount - -# If Accessing/Client Cluster don't have GUI, -# Then change wee need to add variable scale_remotemount_client_no_gui: true and ansible "hosts" need to point to one of the Scale client cluster node diff --git a/samples/legacy/playbook_remotepkg.yml b/samples/legacy/playbook_remotepkg.yml deleted file mode 100644 index 540a523e..00000000 --- a/samples/legacy/playbook_remotepkg.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# -# samples/playbook_remotepkg.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using remote -# archive installation method. This means that the self-extracting archive -# containing the Spectrum Scale code is accessible on each Ansible managed node. -# Integrity of the archive will be validated by comparing checksums with a *.md5 -# reference file (if present), and subsequently the archive will be extracted. -# Packages will then be installed from the local files on each node. - -- hosts: cluster01 - vars: - - scale_install_remotepkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_repository.yml b/samples/legacy/playbook_repository.yml deleted file mode 100644 index 0de9641b..00000000 --- a/samples/legacy/playbook_repository.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# -# samples/playbook_repository.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# repository installation method. You will need to provide the URL of an -# (existing) Spectrum Scale YUM repository. Copy the contents of -# /usr/lpp/mmfs/{{ scale_version }}/* to a web server to build your repository. -# A YUM repository will be defined on each managed node in your cluster. -# Packages will then be installed from this central repository. - -# Note that specifying the variable 'scale_version' is mandatory for this -# installation method. - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - # Remember the trailing slash `/` in the URL - - scale_install_repository_url: http://server/path/ - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_storage.yml b/samples/legacy/playbook_storage.yml deleted file mode 100644 index a9b1deed..00000000 --- a/samples/legacy/playbook_storage.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# samples/playbook_storage.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with storage -# (NSDs) and file systems. Additional variables need to be defined for this, it -# is recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/storage_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: storage_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/vars b/samples/legacy/vars deleted file mode 120000 index b11f011a..00000000 --- a/samples/legacy/vars +++ /dev/null @@ -1 +0,0 @@ -../vars/ \ No newline at end of file From cc5ae1ef328ced3f56e7310028705d8893ad2d13 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 29 Oct 2021 19:18:45 +0200 Subject: [PATCH 002/113] Remove alternative (legacy) installation methods Signed-off-by: Achim Christ --- README.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/README.md b/README.md index 17c202fb..aeaaa8ca 100644 --- a/README.md +++ b/README.md @@ -173,21 +173,6 @@ Installation Instructions └── playbook.yml ``` - - **Alternatives - now deprecated!** - - Alternatively, you can clone the project repository and create your [Ansible playbook](https://docs.ansible.com/ansible/latest/user_guide/playbooks.html) inside the repository's directory structure: - - ```shell - $ git clone https://github.com/IBM/ibm-spectrum-scale-install-infra.git - $ cd ibm-spectrum-scale-install-infra - ``` - - Yet another alternative, you can also define an [Ansible environment variable](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#envvar-ANSIBLE_ROLES_PATH) to make the roles accessible in any external project directory: - - ```shell - $ export ANSIBLE_ROLES_PATH=$(pwd)/ibm-spectrum-scale-install-infra/roles/ - ``` - - **Create Ansible inventory** Define Spectrum Scale nodes in the [Ansible inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) (e.g. `hosts`) in the following format: From b0c1ce1dae09fae06b99e84a4c315a552fc71a56 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 29 Oct 2021 19:21:24 +0200 Subject: [PATCH 003/113] Remove custom_module role in favor of global plugins/ directory Signed-off-by: Achim Christ --- roles/core/cluster/meta/main.yml | 3 +- roles/custom_module/defaults/main.yml | 2 - roles/custom_module/inventory/hosts | 28 - roles/custom_module/library/__init.py__ | 0 .../library/ibm_spectrumscale_cluster.py | 225 ----- .../library/ibm_spectrumscale_filesystem.py | 290 ------ .../library/ibm_spectrumscale_node.py | 926 ------------------ roles/custom_module/meta/main.yml | 19 - .../ibm_spectrumscale_cluster_utils.py | 643 ------------ .../ibm_spectrumscale_df_utils.py | 166 ---- .../ibm_spectrumscale_disk_utils.py | 229 ----- .../ibm_spectrumscale_filesystem_utils.py | 420 -------- .../ibm_spectrumscale_nsd_utils.py | 185 ---- .../module_utils/ibm_spectrumscale_utils.py | 688 ------------- .../ibm_spectrumscale_zimon_utils.py | 79 -- roles/custom_module/tasks/main.yml | 2 - .../cluster/playbooks/cluster-get-test.yaml | 51 - .../filesystem/playbooks/filesystem-test.yaml | 29 - .../test/node/common/AddNodeStanza.j2 | 4 - .../test/node/playbooks/node-add-test.yaml | 35 - .../test/node/playbooks/node-get-test.yaml | 40 - .../test/node/playbooks/node-remove-test.yaml | 162 --- .../test/node/playbooks/node-status-test.yaml | 23 - .../test/node/python/add-node.json | 8 - .../test/node/python/remove-node.json | 6 - 25 files changed, 1 insertion(+), 4262 deletions(-) delete mode 100644 roles/custom_module/defaults/main.yml delete mode 100644 roles/custom_module/inventory/hosts delete mode 100644 roles/custom_module/library/__init.py__ delete mode 100644 roles/custom_module/library/ibm_spectrumscale_cluster.py delete mode 100644 roles/custom_module/library/ibm_spectrumscale_filesystem.py delete mode 100644 roles/custom_module/library/ibm_spectrumscale_node.py delete mode 100644 roles/custom_module/meta/main.yml delete mode 100644 roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py delete mode 100644 roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py delete mode 100644 roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py delete mode 100644 roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py delete mode 100644 roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py delete mode 100755 roles/custom_module/module_utils/ibm_spectrumscale_utils.py delete mode 100644 roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py delete mode 100644 roles/custom_module/tasks/main.yml delete mode 100644 roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml delete mode 100644 roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml delete mode 100644 roles/custom_module/test/node/common/AddNodeStanza.j2 delete mode 100644 roles/custom_module/test/node/playbooks/node-add-test.yaml delete mode 100644 roles/custom_module/test/node/playbooks/node-get-test.yaml delete mode 100644 roles/custom_module/test/node/playbooks/node-remove-test.yaml delete mode 100644 roles/custom_module/test/node/playbooks/node-status-test.yaml delete mode 100644 roles/custom_module/test/node/python/add-node.json delete mode 100644 roles/custom_module/test/node/python/remove-node.json diff --git a/roles/core/cluster/meta/main.yml b/roles/core/cluster/meta/main.yml index 26132da7..6e123370 100644 --- a/roles/core/cluster/meta/main.yml +++ b/roles/core/cluster/meta/main.yml @@ -18,5 +18,4 @@ galaxy_info: - scale - gpfs -dependencies: - - custom_module +dependencies: [] diff --git a/roles/custom_module/defaults/main.yml b/roles/custom_module/defaults/main.yml deleted file mode 100644 index 1ca33279..00000000 --- a/roles/custom_module/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# Default variables for the IBM Spectrum Scale (GPFS) custom module - diff --git a/roles/custom_module/inventory/hosts b/roles/custom_module/inventory/hosts deleted file mode 100644 index 74d91ba1..00000000 --- a/roles/custom_module/inventory/hosts +++ /dev/null @@ -1,28 +0,0 @@ -[scale_cluster] -node1.domain.com -node2.domain.com -node3.doamin.com -node4.domain.com - -[controller] -node1.domain.com - -[quorum_nodes] -node1.domain.com -node2.domain.com -node3.doamin.com - -[manager_nodes] -node1.domain.com -node2.domain.com - -[test_remove_storage_nodes] -node3.doamin.com filesystem="FS1" nsds="nsd3;nsd7" -node4.domain.com filesystem="FS1" nsds="nsd4;nsd8" - -[test_remove_nodes] -node3.doamin.com -node4.domain.com - -[test_add_nodes] -node3.doamin.com designation=client diff --git a/roles/custom_module/library/__init.py__ b/roles/custom_module/library/__init.py__ deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/custom_module/library/ibm_spectrumscale_cluster.py b/roles/custom_module/library/ibm_spectrumscale_cluster.py deleted file mode 100644 index 72f0b50b..00000000 --- a/roles/custom_module/library/ibm_spectrumscale_cluster.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ANSIBLE_METADATA = { - 'status': ['preview'], - 'supported_by': 'IBM', - 'metadata_version': '1.0' - } - - -DOCUMENTATION = ''' ---- -module: ibm_spectrumscale_cluster -short_description: IBM Spectrum Scale Cluster Management -version_added: "0.0" - -description: - - This module can be used to create or delete an IBM Spectrum Scale - Cluster or retrieve information about the cluster. - -options: - op: - description: - - An operation to execute on the IBM Spectrum Scale Cluster. - Mutually exclusive with the state operand. - required: false - state: - description: - - The desired state of the cluster. - required: false - default: "present" - choices: [ "present", "absent" ] - stanza: - description: - - Cluster blueprint that defines membership and node attributes - required: false - name: - description: - - The name of the cluster to be created, deleted or whose - information is to be retrieved - required: false - -''' - -EXAMPLES = ''' -# Retrive information about an existing IBM Spectrum Scale cluster -- name: Retrieve IBM Spectrum Scale Cluster information - ibm_spectrumscale_cluster: - op: list - -# Create a new IBM Spectrum Scale Cluster -- name: Create an IBM Spectrum Scale Cluster - ibm_spectrumscale_cluster: - state: present - stanza: "/tmp/stanza" - name: "node1.domain.com" - -# Delete an existing IBM Spectrum Scale Cluster -- name: Delete an IBM Spectrum Scale Cluster - ibm_spectrumscale_cluster: - state: absent - name: "node1.domain.com" -''' - -RETURN = ''' -changed: - description: A boolean indicating if the module has made changes - type: boolean - returned: always - -msg: - description: The output from the cluster create/delete operations - type: str - returned: when supported - -rc: - description: The return code from the IBM Spectrum Scale mm command - type: int - returned: always - -results: - description: The JSON document containing the cluster information - type: str - returned: when supported -''' - -import os -import json -import sys -import traceback -from ansible.module_utils.basic import AnsibleModule - -try: - from ansible.module_utils.ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger -except: - from ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger - -try: - from ansible.module_utils.ibm_spectrumscale_cluster_utils import SpectrumScaleCluster -except: - from ibm_spectrumscale_cluster_utils import SpectrumScaleCluster - - -def main(): - logger = SpectrumScaleLogger.get_logger() - - logger.debug("------------------------------------") - logger.debug("Function Entry: ibm_spectrumscale_cluster.main()") - logger.debug("------------------------------------") - - # Setup the module argument specifications - scale_arg_spec = dict( - op = dict( - type='str', - choices=['get'], - required=False - ), - state = dict( - type='str', - choices=['present', 'absent'], - required=False - ), - stanza = dict( - type='str', - required=False - ), - name = dict( - type='str', - required=False - ) - ) - - - scale_req_if_args = [ - [ "state", "present", [ "stanza", "name" ] ], - [ "state", "absent", [ "name" ] ] - ] - - scale_req_one_of_args = [ - [ "op", "state" ] - ] - - # Instantiate the Ansible module with the given argument specifications - module = AnsibleModule( - argument_spec=scale_arg_spec, - required_one_of=scale_req_one_of_args, - required_if=scale_req_if_args - ) - - rc = RC_SUCCESS - msg = result_json = "" - state_changed = False - if module.params['op'] and "get" in module.params['op']: - # Retrieve the IBM Spectrum Scale cluster information - try: - scale_cluster = SpectrumScaleCluster() - cluster_info_dict = {} - cluster_info_dict["cluster_info"] = scale_cluster.get_cluster_dict() - result_json = json.dumps(cluster_info_dict) - msg = "Retrieve Cluster information successfully executed" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - elif module.params['state']: - if "present" in module.params['state']: - # Create a new IBM Spectrum Scale cluster - try: - cmd_rc, stdout = SpectrumScaleCluster.create_cluster( - module.params['name'], - module.params['stanza'] - ) - rc = cmd_rc - msg = "Create Cluster successfully executed" - result_json = stdout - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - else: - # Delete the existing IBM Spectrum Scale cluster - try: - cmd_rc, stdout = SpectrumScaleCluster.delete_cluster( - module.params['name'] - ) - rc = cmd_rc - msg = "Delete Cluster successfully executed" - result_json = stdout - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - - - if rc == RC_SUCCESS: - state_changed = True - - logger.debug("------------------------------------") - logger.debug("Function Exit: ibm_spectrumscale_cluster.main()") - logger.debug("------------------------------------") - - SpectrumScaleLogger.shutdown() - - # Module is done. Return back the result - module.exit_json(changed=state_changed, msg=msg, rc=rc, result=result_json) - - -if __name__ == '__main__': - main() diff --git a/roles/custom_module/library/ibm_spectrumscale_filesystem.py b/roles/custom_module/library/ibm_spectrumscale_filesystem.py deleted file mode 100644 index 5bac8e40..00000000 --- a/roles/custom_module/library/ibm_spectrumscale_filesystem.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ANSIBLE_METADATA = { - 'status': ['preview'], - 'supported_by': 'IBM', - 'metadata_version': '1.0' - } - - -DOCUMENTATION = ''' ---- -module: ibm_spectrumscale_filesystem -short_description: IBM Spectrum Scale Filesystem Management -version_added: "0.0" - -description: - - This module can be used to create or delete an IBM Spectrum Scale - filesystem or retrieve information about the filesystem. - -options: - op: - description: - - An operation to execute on the IBM Spectrum Scale filesystem. - Mutually exclusive with the state operand. - required: false - state: - description: - - The desired state of the filesystem. - required: false - default: "present" - choices: [ "present", "absent" ] - stanza: - description: - - Filesystem blueprint that defines membership and NSD attributes - required: false - name: - description: - - The name of the filesystem to be created, deleted or whose - information is to be retrieved - required: false - block_size: - description: - - The filesystem blocksize - required: false - default_metadata_replicas: - description: - - The filesystem defaultMetadataReplicas - required: false - default_data_replicas: - description: - - The filesystem defaultDataReplicas - required: false - num_nodes: - description: - - The filesystem numNodes - required: false - automatic_mount_option: - description: - - The filesystem automaticMountOption - required: false - default_mount_point: - description: - - The filesystem defaultMountPoint - required: false - -''' - -EXAMPLES = ''' -# Retrive information about an existing IBM Spectrum Scale filesystem -- name: Retrieve IBM Spectrum Scale filesystem information - ibm_spectrumscale_filesystem: - op: get - -# Create a new IBM Spectrum Scale Filesystem -- name: Create an IBM Spectrum Scale filesystem - ibm_spectrumscale_filesystem: - state: present - stanza: "/tmp/filesystem-stanza" - name: "FS1" - -# Delete an existing IBM Spectrum Scale Filesystem -- name: Delete an IBM Spectrum Scale filesystem - ibm_spectrumscale_filesystem: - state: absent - name: "FS1" -''' - -RETURN = ''' -changed: - description: A boolean indicating if the module has made changes - type: boolean - returned: always - -msg: - description: The output from the filesystem create/delete operations - type: str - returned: when supported - -rc: - description: The return code from the IBM Spectrum Scale mm command - type: int - returned: always - -results: - description: The JSON document containing the filesystem information - type: str - returned: when supported -''' - -import json -import sys -import traceback -from ansible.module_utils.basic import AnsibleModule - -try: - from ansible.module_utils.ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger -except: - from ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger - -try: - from ansible.module_utils.ibm_spectrumscale_filesystem_utils import SpectrumScaleFS -except: - from ibm_spectrumscale_filesystem_utils import SpectrumScaleFS - - -def main(): - logger = SpectrumScaleLogger.get_logger() - - logger.debug("---------------------------------------") - logger.debug("Function Entry: ibm_spectrumscale_filesystem.main()") - logger.debug("---------------------------------------") - - # Setup the module argument specifications - scale_arg_spec = dict( - op = dict( - type='str', - choices=['get'], - required=False - ), - state = dict( - type='str', - choices=['present', 'absent'], - required=False - ), - stanza = dict( - type='str', - required=False - ), - name = dict( - type='str', - required=False - ), - block_size = dict( - type='str', - required=False - ), - num_nodes = dict( - type='str', - required=False - ), - default_metadata_replicas = dict( - type='str', - required=False - ), - default_data_replicas = dict( - type='str', - required=False - ), - automatic_mount_option = dict( - type='str', - required=False - ), - default_mount_point = dict( - type='str', - required=False - ) - ) - - - scale_req_if_args = [ - [ "state", "present", [ "stanza", - "name", - "block_size", - "num_nodes", - "default_metadata_replicas", - "default_data_replicas", - "automatic_mount_option", - "default_mount_point" ] - ], - [ "state", "absent", [ "name" ] ] - ] - - scale_req_one_of_args = [ - [ "op", "state" ] - ] - - # Instantiate the Ansible module with the given argument specifications - module = AnsibleModule( - argument_spec=scale_arg_spec, - required_one_of=scale_req_one_of_args, - required_if=scale_req_if_args - ) - - rc = RC_SUCCESS - msg = result_json = "" - state_changed = False - if module.params['op'] and "get" in module.params['op']: - # Retrieve the IBM Spectrum Scale filesystem information - try: - result_dict = {} - filesystem_list = [] - - filesystems = SpectrumScaleFS.get_filesystems() - for fs in filesystems: - filesystem_info = {} - filesystem_info["deviceName"] = fs.get_device_name() - filesystem_info["properties"] = fs.get_properties_list() - filesystem_list.append(filesystem_info) - - result_dict["filesystems"] = filesystem_list - result_json = json.dumps(result_dict) - - msg = "Successfully retrieved filesystem information" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - elif module.params['state']: - if "present" in module.params['state']: - # Create a new IBM Spectrum Scale cluster - try: - rc, result_json = SpectrumScaleFS.create_filesystem( - module.params['stanza'], - module.params['name'], - module.params["block_size"], - module.params["num_nodes"], - module.params["default_metadata_replicas"], - module.params["default_data_replicas"], - module.params["automatic_mount_option"], - module.params["default_mount_point"] - ) - msg = "Successfully created filesystem" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - else: - # Delete the existing IBM Spectrum Scale cluster - try: - rc, result_json = SpectrumScaleFS.delete_filesystem( - module.params['name'] - ) - msg = "Successfully deleted filesystem" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - - if rc == RC_SUCCESS: - state_changed = True - - logger.debug("---------------------------------------") - logger.debug("Function Exit: ibm_spectrumscale_filesystem.main()") - logger.debug("---------------------------------------") - - logger = SpectrumScaleLogger.shutdown() - - # Module is done. Return back the result - module.exit_json(changed=state_changed, msg=msg, rc=rc, result=result_json) - - -if __name__ == '__main__': - main() diff --git a/roles/custom_module/library/ibm_spectrumscale_node.py b/roles/custom_module/library/ibm_spectrumscale_node.py deleted file mode 100644 index 4a47e327..00000000 --- a/roles/custom_module/library/ibm_spectrumscale_node.py +++ /dev/null @@ -1,926 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ANSIBLE_METADATA = { - 'status': ['preview'], - 'supported_by': 'IBM', - 'metadata_version': '1.0' - } - - -DOCUMENTATION = ''' ---- -module: ibm_spectrumscale_node -short_description: IBM Spectrum Scale Node Management -version_added: "0.1" - -description: - - This module can be used to add, remove or retrieve information - about an IBM Spectrum Scale Node(s) from the Cluster. - -options: - op: - description: - - An operation to execute on the IBM Spectrum Scale Node. - Mutually exclusive with the state operand. - required: false - state: - description: - - The desired state of the Node in relation to the cluster. - required: false - default: "present" - choices: [ "present", "absent" ] - nodefile: - description: - - Blueprint that defines all node attributes - required: false - name: - description: - - The name of the Node to be added, removed or whose - information is to be retrieved - required: false - -''' - -EXAMPLES = ''' -# Retrive information about an existing IBM Spectrum Scale Node(s) -- name: Retrieve IBM Spectrum Scale Node information - ibm_spectrumscale_node: - op: list - -# Adds a Node to the IBM Spectrum Scale Cluster -- name: Add node to IBM Spectrum Scale Cluster - ibm_spectrumscale_node: - state: present - nodefile: "/tmp/nodefile" - name: "node1.domain.com" - -# Delete an existing IBM Spectrum Node from the Cluster -- name: Delete an IBM Spectrum Scale Node from Cluster - ibm_spectrumscale_node: - state: absent - name: "node1.domain.com" -''' - -RETURN = ''' -changed: - description: A boolean indicating if the module has made changes - type: boolean - returned: always - -msg: - description: The output from the cluster create/delete operations - type: str - returned: when supported - -rc: - description: The return code from the IBM Spectrum Scale mm command - type: int - returned: always - -results: - description: The JSON document containing the cluster information - type: str - returned: when supported -''' - -import os -import re -import sys -import json -import time -import logging -import traceback -from ansible.module_utils.basic import AnsibleModule - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, RC_SUCCESS, \ - parse_aggregate_cmd_output, \ - SpectrumScaleLogger, \ - SpectrumScaleException -except Exception as e: - print(e) - from ibm_spectrumscale_utils import runCmd, RC_SUCCESS, parse_aggregate_cmd_output, \ - SpectrumScaleLogger, SpectrumScaleException - -try: - from ansible.module_utils.ibm_spectrumscale_disk_utils import SpectrumScaleDisk -except Exception as e: - print(e) - from ibm_spectrumscale_disk_utils import SpectrumScaleDisk - -try: - from ansible.module_utils.ibm_spectrumscale_df_utils import SpectrumScaleDf -except: - from ibm_spectrumscale_df_utils import SpectrumScaleDf - -try: - from ansible.module_utils.ibm_spectrumscale_nsd_utils import SpectrumScaleNSD -except: - from ibm_spectrumscale_nsd_utils import SpectrumScaleNSD - -try: - from ansible.module_utils.ibm_spectrumscale_filesystem_utils import SpectrumScaleFS -except: - from ibm_spectrumscale_filesystem_utils import SpectrumScaleFS - -try: - from ansible.module_utils.ibm_spectrumscale_cluster_utils import SpectrumScaleCluster, \ - SpectrumScaleNode -except: - from ibm_spectrumscale_cluster_utils import SpectrumScaleCluster, SpectrumScaleNode - -try: - from ansible.module_utils.ibm_spectrumscale_zimon_utils import get_zimon_collectors -except: - from ibm_spectrumscale_zimon_utils import get_zimon_collectors - -############################################################################### -## ## -## Helper Functions ## -## ## -############################################################################### - -def get_all_nsds_of_node(logger, instance): - """ - This function performs "mmlsnsd -X -Y". - Args: - instance (str): instance for which disks are use by filesystem. - region (str): Region of operation - Returns: - all_disk_names (list): Disk names in list format. - Ex: [nsd_1a_1_0, nsd_1c_1_0, nsd_1c_d_1] - """ - logger.debug("Function Entry: get_all_nsds_of_node. " - "Args: instance={0}".format(instance)) - nsd_list = [] - nsd_list = SpectrumScaleNSD.get_all_nsd_info() - - all_nsd_names = [] - for nsd in nsd_list: - if nsd.get_remarks() == 'server node' and instance in nsd.get_server_list(): - all_nsd_names.append(nsd.get_name()) - - logger.debug("Function Exit: get_all_nsds_of_node(). " - "Return Params: all_nsd_names={0} ".format(all_nsd_names)) - - return all_nsd_names - - -def gpfs_df_disk(logger, fs_name): - """ - This function performs "mmdf" to obtain disk capacities. - Args: - fs_name (str): Filesystem name associated with the disks. - Returns: - disk_size_map (dict): Disk name vs. free block size vs. percent - free blocks. - Ex: { - 'nsd_1a_1_0': {'free_size': 10485760, - 'used_size': 480256, - 'percent': 95}, - 'nsd_1c_1_0': {'free_size': 10485760, - 'used_size': 480256, - 'percent': 95} - } - """ - logger.debug("Function Entry: gpfs_df_disk(). " - "Args: fs_name={0}".format(fs_name)) - - nsd_df_list = SpectrumScaleDf.get_df_info(fs_name) - disk_size_map = {} - for nsd_df in nsd_df_list: - total = nsd_df.get_disk_size() - free = nsd_df.get_free_blocks() - used = total - free - free_block_pct = nsd_df.get_free_blocks_pct() - disk = nsd_df.get_nsd_name() - disk_size_map[disk] = { - 'free_size': free, - 'used_size': used, - 'percent': free_block_pct - } - - logger.debug("Function Exit: gpfs_df_disk(). " - "Return Params: disk_size_map={0} ".format(disk_size_map)) - - return disk_size_map - - -def get_node_nsd_info(logger): - logger.debug("Function Entry: get_node_nsd_info().") - - nsd_list = SpectrumScaleNSD.get_all_nsd_info() - - node_nsd_map = {} - nsd_node_map = {} - - for nsd in nsd_list: - if nsd.get_remarks() == 'server node': - # Populate the node_nsd_map data structure - nsd_list = [] - for node_name in nsd.get_server_list(): - if node_name in list(node_nsd_map.keys()): - nsd_list = node_nsd_map[node_name] - nsd_list.append(nsd.get_name()) - node_nsd_map[node_name] = nsd_list - - # Populate the nsd_node_map data structure - host_list = [] - if nsd.get_name() in list(nsd_node_map.keys()): - host_list = nsd_node_map[nsd.get_name()] - for server in nsd.get_server_list(): - host_list.append(server) - nsd_node_map[nsd.get_name()] = host_list - - logger.debug("Function Exit: get_node_nsd_info(). " - "Return Params: node_nsd_map={0} " - "nsd_node_map={1}".format(node_nsd_map, nsd_node_map)) - - return node_nsd_map, nsd_node_map - - -############################################################################### -## ## -## Functions to remove node(s) from cluster ## -## ## -############################################################################### - -# -# Retrieve the mapping of Filesystems to NSDs -# -# Returns: -# fs_to_nsd_map (dict): Dict of fs names and SpectrumScaleDisk objects -# -def get_filesystem_to_nsd_mapping(logger): - logger.debug("Function Entry: get_filesystem_to_nsd_mapping().") - - fs_to_nsd_map = {} - - # Retrieve all filesystems on this cluster - fs_instance_list = SpectrumScaleFS.get_filesystems() - - # For each filesystem, determine the Filesystem to NSD mapping - for fs in fs_instance_list: - - # Get all NSDs for this Filesystem - nsds_for_fs = SpectrumScaleDisk.get_all_disk_info(fs.get_device_name()) - - for nsd in nsds_for_fs: - nsd_list = [] - - # If an entry already exists for the File system, then - # simply add the new NSD to the list - if fs.get_device_name() in list(fs_to_nsd_map.keys()): - nsd_list = fs_to_nsd_map[fs.get_device_name()] - - nsd_list.append(nsd) - fs_to_nsd_map[fs.get_device_name()] = nsd_list - - logger.debug("Function Exit: get_filesystem_to_nsd_mapping(). " - "Return Params: fs_to_nsd_map={0} ".format(fs_to_nsd_map)) - - return fs_to_nsd_map - - -def check_cluster_health(logger): - logger.debug("Function Entry: check_cluster_health(). ") - - unhealthy_nodes = [] - all_nodes_state = SpectrumScaleNode.get_state() - - for node_name, state in list(all_nodes_state.items()): - if ("down" in state or - "arbitrating" in state or - "unknown" in state): - unhealthy_nodes.append(node_name) - - if unhealthy_nodes: - unhealthy_nodes_str = ' '.join(map(str, unhealthy_nodes)) - error_msg = ("The following node(s) \"{0}\" is(are) currently not up. " - "Ensure all nodes in the cluster are fully operational " - "before retrying the operation.".format(unhealthy_nodes_str)) - logger.error(error_msg) - raise SpectrumScaleException(error_msg, "", [], -1, "", "") - - logger.debug("Function Exit: check_cluster_health(). ") - - -def check_nodes_exist(logger, nodes_to_be_deleted): - logger.debug("Function Entry: check_nodes_exist(). " - "Args: nodes_to_be_deleted={0}".format(nodes_to_be_deleted)) - - logger.info("Checking if node(s) marked for removal exist in the cluster") - filtered_nodes_to_be_deleted = [] - existing_node_list = SpectrumScaleCluster().get_nodes() - for node_to_del in nodes_to_be_deleted: - for existing_node in existing_node_list: - if (node_to_del in existing_node.get_daemon_node_name() or - node_to_del in existing_node.get_admin_node_name() or - node_to_del in existing_node.get_ip_address()): - filtered_nodes_to_be_deleted.append(existing_node) - - logger.debug("Function Exit: check_nodes_exist(). " - "Return Params: filtered_nodes_to_be_deleted=" - "{0} ".format(filtered_nodes_to_be_deleted)) - - return filtered_nodes_to_be_deleted - - -def check_roles_before_delete(logger, existing_node_list_to_del): - logger.debug("Function Entry: check_roles_before_delete(). " - "Args: existing_node_list_to_del=" - "{0}".format(existing_node_list_to_del)) - - logger.info("Checking the designations for all nodes marked for removal") - - for node_to_del in existing_node_list_to_del: - # Do not delete nodes that are designated as "quorum", "manager", - # "gateway", "ces", "TCT", "SNMP" - if (node_to_del.is_quorum_node() or - node_to_del.is_manager_node() or - node_to_del.is_gateway_node() or - node_to_del.is_ces_node() or - node_to_del.is_tct_node() or - node_to_del.is_snmp_node()): - exp_msg = ("Cannot remove node {0} since it is designated " - "as either a quorum, gateway, CES, TCT or SNMP " - "node. Re-run the current command without " - "{1}".format(node_to_del.get_admin_node_name(), - node_to_del.get_admin_node_name())) - logger.error(exp_msg) - raise SpectrumScaleException(exp_msg, "", [], -1, "", "") - - # TODO: Should we also check the Zimon Collector Nodes - # zimon_col_nodes = get_zimon_collectors() - - logger.debug("Function Exit: check_roles_before_delete().") - - -def check_disk_health(logger, fs_nsd_map): - logger.debug("Function Entry: check_disk_health(). " - "Args fs_nsd_map={0}".format(fs_nsd_map)) - - unhealthy_disks = [] - for fs_name, disk_list in list(fs_nsd_map.items()): - for disk in disk_list: - if "down" in disk.get_availability(): - unhealthy_disks.append(disk.get_nsd_name()) - - if unhealthy_disks: - unhealthy_disks_str = ' '.join(map(str, unhealthy_disks)) - error_msg = ("The following disks \"{0}\" are currently not healthy. " - "Ensure all disks in the cluster are healthy before " - "retrying the operation.".format(unhealthy_disks_str)) - logger.error(error_msg) - raise SpectrumScaleException(error_msg, "", [], -1, "", "") - - logger.debug("Function Exit: check_disk_health(). ") - - -def remove_multi_attach_nsd(logger, nodes_to_be_deleted): - logger.debug("Function Entry: remove_multi_attach_nsd(). " - "Args nodes_to_be_deleted={0}".format(nodes_to_be_deleted)) - - logger.info("Checking node(s) for multi-node attached NSD(s)") - - # Iterate through each server to be deleted - node_map, nsd_map = get_node_nsd_info(logger) - for node_to_delete in nodes_to_be_deleted: - logger.debug("Processing all NSDs on node={0} for " - "removal".format(node_to_delete.get_admin_node_name())) - #node_map, nsd_map = get_node_nsd_info(logger) - - # Check if the node to be deleted has access to any NSDs - #if node_to_delete in node_map.keys(): - if node_to_delete.get_admin_node_name() in list(node_map.keys()): - nsds_to_delete_list = node_map[node_to_delete.get_admin_node_name()] - - # For each Node, check all the NSDS it has access to. If the - # Node has access to an NSD that can also be accessed from other - # NSD servers, then we can simply modify the server access list - # through the mmchnsd command - for nsd_to_delete in nsds_to_delete_list: - # Clone list to avoid modifying original content - nsd_attached_to_nodes = (nsd_map[nsd_to_delete])[:] - nsd_attached_to_nodes.remove(node_to_delete.get_admin_node_name()) - if len(nsd_attached_to_nodes) >= 1: - # This node has access to an NSD, that can also be - # accessed by other NSD servers. Therefore modify the - # server access list - logger.info("Removing server access to NSD {0} from node " - "{1}".format(nsd_to_delete, - node_to_delete.get_admin_node_name())) - SpectrumScaleNSD.remove_server_access_to_nsd(nsd_to_delete, - node_to_delete.get_admin_node_name(), - nsd_attached_to_nodes) - - # All "mmchnsd" calls are asynchronous. Therefore wait here till all - # modifications are committed before proceeding further. For now just - # sleep but we need to enhance this to ensure the async op has completed - time.sleep(10) - - logger.debug("Function Exit: remove_multi_attach_nsd(). ") - - -# -# This function performs removal / termination of nodes from the IBM Spectrum -# Scale cluster. If the node is a server node that has access to NSD(s), then -# we attempt to remove access to this NSD (if the NSD is a shared NSD) or -# delete access to it (if its a dedicated NSD). -# -# Args: -# node_names_to_delete: Nodes to be deleted from the cluster -# -# Return: -# rc: Return code -# msg: Output message -def remove_nodes(logger, node_names_to_delete): - logger.debug("Function Entry: remove_nodes(). " - "Args: node_list={0}".format(node_names_to_delete)) - - rc = RC_SUCCESS - msg = result_json = "" - removed_node_list = [] - - logger.info("Attempting to remove node(s) {0} from the " - "cluster".format(' '.join(map(str, node_names_to_delete)))) - - # TODO: The cluster health check should only fail if we are attempting - # to remove NSD servers while other NSD servers are down. The - # removal of compute nodes should be permitted even if NSD - # servers are down. For now disable check until correct algorithm - # can be implemented - # Ensure all nodes in the cluster are healthy - #check_cluster_health(logger) - - # Check that the list of nodes to delete already exist. If not, - # simply ignore - nodes_to_delete = check_nodes_exist(logger, node_names_to_delete) - - if len(nodes_to_delete) == 0: - msg = str("All node(s) marked for removal ({0}) are already not part " - "of the cluster".format(' '.join(map(str, - node_names_to_delete)))) - logger.info(msg) - return rc, msg, result_json - - # Precheck nodes to make sure they do not have any roles that should - # not be deleted - check_roles_before_delete(logger, nodes_to_delete) - - # For each Filesystem, Get the Filesystem to NSD (disk) mapping - fs_nsd_map = get_filesystem_to_nsd_mapping(logger) - - # TODO: The disk health check should only fail if we are attempting - # to remove NSD servers when any disks are down. The removal - # of compute nodes should be permitted even if disks are down. - # For now disable check until correct algorithm can be implemented - #check_disk_health(logger, fs_nsd_map) - - # An NSD node can have access to a multi attach NSD (shared NSD) or - # dedicated access to the NSD (FPO model) or a combination of both. - - # First modify the Shared NSDs and remove access to all NSD Nodes - # that are to be deleted. Note: As long as these are Shared NSD's - # another NSD server will continue to have access to the NSD (and - # therefore Data) - remove_multi_attach_nsd(logger, nodes_to_delete) - - # Finally delete any dedicated NSDs (this will force the data to be - # copied to another NSD in the same Filesystem). Finally delete the - # node from the cluster - - logger.debug("Identified all filesystem to disk mapping: " - "{0}".format(fs_nsd_map)) - - for node_to_del_obj in nodes_to_delete: - node_to_del = node_to_del_obj.get_admin_node_name() - logger.debug("Operating on server: {0}".format(node_to_del)) - - # For each node to be deleted, retrieve the NSDs (disks) on the node - all_node_disks = get_all_nsds_of_node(logger, node_to_del) - logger.debug("Identified disks for server ({0}): " - "{1}".format(node_to_del, all_node_disks)) - - # The Node does not have any disks on it (compute node). Delete the - # node without any more processing - if len(all_node_disks) == 0: - logger.info("Unmounting filesystem(s) on {0}".format(node_to_del)) - SpectrumScaleFS.unmount_filesystems(node_to_del, wait=True) - - logger.info("Shutting down node {0}".format(node_to_del)) - SpectrumScaleNode.shutdown_node(node_to_del, wait=True) - - logger.info("Deleting compute node {0}".format(node_to_del)) - SpectrumScaleCluster.delete_node(node_to_del) - - removed_node_list.append(node_to_del) - continue - - # Generate a list of NSD (disks) on the host to be deleted for - # each filesystem - # - # fs_disk_map{} contains the following: - # Filesystem Name -> NSDs on the host to be deleted - fs_disk_map = {} - for fs_name, disks in list(fs_nsd_map.items()): - node_specific_disks = [] - for disk_instance in disks: - if disk_instance.get_nsd_name() in all_node_disks: - node_specific_disks.append(disk_instance.get_nsd_name()) - fs_disk_map[fs_name] = node_specific_disks - - logger.debug("Identified filesystem to disk map for server " - "({0}): {1}".format(node_to_del, fs_disk_map)) - - for fs in fs_disk_map: - disk_cap = gpfs_df_disk(logger, fs) - logger.debug("Identified disk capacity for filesystem " - "({0}): {1}".format(fs, disk_cap)) - - # Algorithm used for checking at-least 20% free space during - # mmdeldisk in progress; - # - Identify the size of data stored in disks going to be - # deleted. - # - Identify the free size of the filesystem - # (excluding the disk going to be deleted) - # - Allow for disk deletion, if total_free size is 20% greater - # even after moving used data stored in disk going to be deleted. - size_to_be_del = 0 - for disk in fs_disk_map[fs]: - size_to_be_del += disk_cap[disk]['used_size'] - logger.debug("Identified data size going to be deleted from " - "filesystem ({0}): {1}".format(fs, size_to_be_del)) - - other_disks = [] - for disk_name in disk_cap: - if disk_name not in fs_disk_map[fs]: - other_disks.append(disk_name) - logger.debug("Identified other disks of the filesystem " - "({0}): {1}".format(fs, other_disks)) - - if not other_disks: - msg = str("No free disks available to restripe data " - "for the filesystem {0}".format(fs)) - logger.error(msg) - raise SpectrumScaleException(msg=msg, mmcmd="", cmdargs=[], - rc=-1, stdout="", stderr="") - - size_avail_after_migration, total_free = 0, 0 - for disk in other_disks: - # Accumulate free size on all disks. - total_free += disk_cap[disk]['free_size'] - logger.debug("Identified free size in other disks of the " - "filesystem ({0}): {1}".format(fs, total_free)) - - size_avail_after_migration = total_free - size_to_be_del - logger.debug("Expected size after restriping of the filesystem " - "({0}): {1}".format(fs, size_avail_after_migration)) - - percent = int(size_avail_after_migration*100/total_free) - logger.debug("Expected percentage of size left after restriping " - "of the filesystem ({0}): {1}".format(fs, percent)) - - if percent < 20: - msg = ("Not enough space left for restriping data for " - "filesystem {0}".format(fs)) - logger.error(msg) - raise SpectrumScaleException(msg=msg, mmcmd="", cmdargs=[], - rc=-1, stdout="", stderr="") - - if fs_disk_map[fs]: - # mmdeldisk will not be hit if there are no disks to delete. - logger.info("Deleting disk(s) {0} from node " - "{1}".format(' '.join(map(str, fs_disk_map[fs])), - node_to_del)) - SpectrumScaleDisk.delete_disk(node_to_del, fs, fs_disk_map[fs]) - - if all_node_disks: - # mmdelnsd will not be hot if there are no disks to delete. - logger.info("Deleting all NSD(s) {0} attached to node " - "{1}".format(' '.join(map(str, all_node_disks)), - node_to_del)) - SpectrumScaleNSD.delete_nsd(all_node_disks) - - logger.info("Unmounting filesystem(s) on {0}".format(node_to_del)) - SpectrumScaleFS.unmount_filesystems(node_to_del, wait=True) - - logger.info("Shutting down node {0}".format(node_to_del)) - SpectrumScaleNode.shutdown_node(node_to_del, wait=True) - - logger.info("Deleting storage node {0}".format(node_to_del)) - SpectrumScaleCluster.delete_node(node_to_del) - - removed_node_list.append(node_to_del) - - msg = str("Successfully removed node(s) {0} from the " - "cluster".format(' '.join(map(str, removed_node_list)))) - - logger.info(msg) - logger.debug("Function Exit: remove_nodes(). " - "Return Params: rc={0} msg={1}".format(rc, msg)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Functions to retrieve Node information ## -## ## -############################################################################### - -def get_node_info_as_json(logger, node_names=[]): - logger.debug("Function Entry: get_node_info_as_json(). " - "Args: node_names={0}".format(node_names)) - - rc = 0 - msg = result_json = "" - node_info_dict = {} - node_info_list = [] - - cluster = SpectrumScaleCluster() - node_instance_list = cluster.get_nodes() - - for node_instance in node_instance_list: - if len(node_names) == 0: - node_info_list.append(node_instance.get_node_dict()) - else: - if (node_instance.get_ip_address() in node_names or - node_instance.get_admin_node_name() in node_names or - node_instance.get_daemon_node_name() in node_names): - node_info_list.append(node_instance.get_node_dict()) - - node_info_dict["clusterNodes"] = node_info_list - result_json = json.dumps(node_info_dict) - msg = "List cluster successfully executed" - - logger.debug("Function Exit: get_node_info_as_json(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -def get_node_status_as_json(logger, node_names=[]): - logger.debug("Function Entry: get_node_status_as_json(). " - "Args: node_names={0}".format(node_names)) - - rc = 0 - msg = result_json = "" - node_status = {} - - node_state = SpectrumScaleNode.get_state(node_names) - result_json = json.dumps(node_state) - msg = "Cluster status successfully executed" - - logger.debug("Function Exit: get_node_status_as_json(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Functions to Stop/Start Node(s) in the Cluster ## -## ## -############################################################################### - -def start_nodes(logger, node_names): - logger.debug("Function Entry: start_nodes(). " - "Args: node_names={0}".format(node_names)) - - rc = RC_SUCCESS - msg = stdout = result_json = "" - - for node in node_names: - logger.info("Attempting to start node {0}".format(node)) - rc, stdout = SpectrumScaleNode.start_node(node, wait=True) - - msg = str("Successfully started node(s) " - "{0}".format(' '.join(map(str, node_names)))) - - logger.info(msg) - - logger.debug("Function Exit: start_nodes(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -def stop_nodes(logger, node_names): - logger.debug("Function Entry: stop_nodes(). " - "Args: node_names={0}".format(node_names)) - - rc = RC_SUCCESS - msg = stdout = result_json = "" - - for node in node_names: - logger.info("Attempting to stop node {0}".format(node)) - rc, stdout = SpectrumScaleNode.shutdown_node(node, wait=True) - - msg = str("Successfully stopped node(s) " - "{0}".format(' '.join(map(str, node_names)))) - - logger.info(msg) - - logger.debug("Function Exit: stop_nodes(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Functions to add Node(s) to the Cluster ## -## ## -############################################################################### - -def add_nodes(logger, node_names, stanza, license): - logger.debug("Function Entry: add_nodes(). " - "Args: node_names={0}".format(node_names)) - - rc = RC_SUCCESS - msg = stdout = result_json = "" - - logger.info("Attempting to add node(s) {0} to the " - "cluster".format(' '.join(map(str, node_names)))) - - rc, stdout, stderr = SpectrumScaleCluster.add_node(node_names, stanza) - - logger.info("Attempting to apply licenses to newly added " - "node(s)".format(' '.join(map(str, node_names)))) - - rc, stdout = SpectrumScaleCluster.apply_license(node_names, license) - - for node in node_names: - logger.info("Attempting to start node {0}".format(node)) - rc, stdout = SpectrumScaleNode.start_node(node, wait=True) - - msg = str("Successfully added node(s) {0} to the " - "cluster".format(' '.join(map(str, node_names)))) - - logger.info(msg) - - logger.debug("Function Exit: add_nodes(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Main Function ## -## ## -############################################################################### - -def main(): - logger = SpectrumScaleLogger.get_logger() - - logger.debug("----------------------------------") - logger.debug("Function Entry: ibm_spectrumscale_node.main()") - logger.debug("----------------------------------") - - # Setup the module argument specifications - scale_arg_spec = dict( - op = dict( - type='str', - choices=['get', 'status', 'start', 'stop'], - required=False - ), - state = dict( - type='str', - choices=['present', 'absent'], - required=False - ), - nodefile = dict( - type='str', - required=False - ), - name = dict( - type='str', - required=False - ), - license = dict( - type='str', - choices=['server', 'client', 'fpo'], - required=False - ), - ) - - - scale_req_args = [ - [ "state", "present", [ "nodefile", "name", "license" ] ], - [ "state", "absent", [ "name" ] ] - ] - - - scale_req_one_of_args = [ - [ "op", "state" ] - ] - - scale_mutual_ex_args = [ - [ "get", "status", "start", "stop" ] - ] - - # Instantiate the Ansible module with the given argument specifications - module = AnsibleModule( - argument_spec=scale_arg_spec, - required_one_of=scale_req_one_of_args, - required_if=scale_req_args, - mutually_exclusive=scale_mutual_ex_args - ) - - rc = -1 - msg = result_json = "" - state_changed = False - - try: - if module.params['op']: - node_names = [] - if module.params['name']: - node_names = module.params['name'].split(',') - - if "get" in module.params['op']: - # Retrieve the IBM Spectrum Scale node information - rc, msg, result_json = get_node_info_as_json(logger, - node_names) - elif "status" in module.params['op']: - # Retrieve the IBM Spectrum Scale Node state - rc, msg, result_json = get_node_status_as_json(logger, - node_names) - elif "start" in module.params['op']: - # Start the IBM Spectrum Scale Server(s) - rc, msg, result_json = start_nodes(logger, node_names) - elif "stop" in module.params['op']: - # Stop the IBM Spectrum Scale Server(s) - rc, msg, result_json = stop_nodes(logger, node_names) - - elif module.params['state']: - listofserver = module.params['name'] - if "present" in module.params['state']: - # Create a new IBM Spectrum Scale cluster - rc, msg, result_json = add_nodes(logger, - listofserver.split(','), - module.params['nodefile'], - module.params['license']) - else: - # Delete the existing IBM Spectrum Scale cluster - rc, msg, result_json = remove_nodes(logger, - listofserver.split(',')) - - if rc == RC_SUCCESS: - state_changed = True - - except SpectrumScaleException as sse: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(sse), st)) - logger.debug(e_msg) - failure_msg = "FAILED: " + sse.get_message() - module.fail_json(msg=failure_msg, changed=False, rc=-1, - result=result_json, stderr=str(st)) - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - logger.debug(e_msg) - failure_msg = "FAILED: " + e.get_message() - module.fail_json(msg=failure_msg, changed=False, rc=-1, - result=result_json, stderr=str(st)) - - logger.debug("---------------------------------") - logger.debug("Function Exit: ibm_spectrumscale_node.main()") - logger.debug("---------------------------------") - - SpectrumScaleLogger.shutdown() - - # Module is done. Return back the result - if rc == RC_SUCCESS: - module.exit_json(msg=msg, changed=state_changed, rc=rc, result=result_json) - else: - failure_msg = "FAILED: " + msg - module.fail_json(msg=failure_msg, changed=state_changed, rc=rc, - result=result_json) - - -if __name__ == '__main__': - main() diff --git a/roles/custom_module/meta/main.yml b/roles/custom_module/meta/main.yml deleted file mode 100644 index e3f01801..00000000 --- a/roles/custom_module/meta/main.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -galaxy_info: - role_name: core_cluster - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py deleted file mode 100644 index a1e9f28a..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py +++ /dev/null @@ -1,643 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import json -import time - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_aggregate_cmd_output, parse_unique_records, GPFS_CMD_PATH, \ - RC_SUCCESS, SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_aggregate_cmd_output, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleNode: - - def __init__(self, node_dict): - self.node = node_dict - self.node_number = int(self.node["nodeNumber"]) - self.daemon_name = self.node["daemonNodeName"] - self.admin_name = self.node["adminNodeName"] - self.ip = self.node["ipAddress"] - self.admin_login = self.node["adminLoginName"] - self.designation = self.node["designation"] - self.other_roles = self.node["otherNodeRoles"] - self.role_alias = self.node["otherNodeRolesAlias"] - - def get_node_number(self): - return self.node_number - - def get_daemon_node_name(self): - return self.daemon_name - - def get_admin_node_name(self): - return self.admin_name - - def get_ip_address(self): - return self.ip - - def get_admin_login_name(self): - return self.admin_login - - def get_designation(self): - # The "designation" field can have the following values: - # "quorumManager" - # "quorum" - # "manager" - # "" - return self.designation - - def get_other_node_roles(self): - # The "otherNodeRoles" field can have a comma seperated list of - # one of the following alphabets - # "M" - cloudNodeMarker - # "G" - gatewayNode - # "C" - cnfsNode - # "X" - cesNode - # "C" - ctdbNode - # "I" - ioNode - # "s" - snmpAgent - # "t" - tealAgent - # "Z" - perfmonNode - # "E" - cnfsEnabled - # "D" - cnfsDisabled - # "new" - NEW_NODE - # "" - OLD_NODE - # "Q" - quorumNode - # "N" - nonQuorumNode - return self.other_roles - - def get_other_node_roles_alias(self): - # The "otherNodeRolesAlias" field can have a comma seperated list of - # one of the following - # "gateway" - # "ctdb" - # "ionode" - # "snmp_collector" - # "teal_collector" - # "perfmon" - # "ces" - # "cnfs" - return self.role_alias - - def is_quorum_node(self): - if "quorum" in self.designation: - return True - return False - - def is_manager_node(self): - if "manager" in (self.designation).lower(): - return True - return False - - def is_tct_node(self): - if "M" in self.other_roles: - return True - return False - - def is_gateway_node(self): - if ("G" in self.other_roles or - "gateway" in self.role_alias): - return True - return False - - def is_ctdb_node(self): - if "ctdb" in self.role_alias: - return True - return False - - def is_io_node(self): - if ("I" in self.other_roles or - "ionode" in self.role_alias): - return True - return False - - def is_snmp_node(self): - if ("s" in self.other_roles or - "snmp_collector" in self.role_alias): - return True - return False - - def is_teal_node(self): - if ("t" in self.other_roles or - "teal_collector" in self.role_alias): - return True - return False - - def is_perfmon_node(self): - if ("Z" in self.other_roles or - "perfmon" in self.role_alias): - return True - return False - - def is_ces_node(self): - if ("X" in self.other_roles or - "ces" in self.role_alias): - return True - return False - - def is_cnfs_node(self): - if ("E" in self.other_roles or - "D" in self.other_roles or - "cnfs" in self.role_alias): - return True - return False - - def to_json(self): - return json.dumps(self.node) - - def get_node_dict(self): - return self.node - - def print_node(self): - print(("Node Number : {0}".format(self.get_node_number()))) - print(("Daemon Node Name : {0}".format(self.get_daemon_node_name()))) - print(("IP Address : {0}".format(self.get_ip_address()))) - print(("Admin Node Name : {0}".format(self.get_admin_node_name()))) - print(("Designation : {0}".format(self.get_designation()))) - print(("Other Node Roles : {0}".format(self.get_other_node_roles()))) - print(("Admin Login Name : {0}".format(self.get_admin_login_name()))) - print(("Other Node Roles Alias : {0}".format(self.get_other_node_roles_alias()))) - print(("Is Quorum Node : {0}".format(self.is_quorum_node()))) - print(("Is Manager Node : {0}".format(self.is_manager_node()))) - print(("Is TCT Node : {0}".format(self.is_tct_node()))) - print(("Is Gateway Node : {0}".format(self.is_gateway_node()))) - print(("Is CTDB Node : {0}".format(self.is_ctdb_node()))) - print(("Is IO Node : {0}".format(self.is_io_node()))) - print(("Is SNMP Node : {0}".format(self.is_snmp_node()))) - print(("Is Teal Node : {0}".format(self.is_teal_node()))) - print(("Is Perfmon Node : {0}".format(self.is_perfmon_node()))) - print(("Is CES Node : {0}".format(self.is_ces_node()))) - print(("Is CNFS Node : {0}".format(self.is_cnfs_node()))) - - - def __str__(self): - return str("Node Number : {0}\n" - "Daemon Node Name : {1}\n" - "IP Address : {2}\n" - "Admin Node Name : {3}\n" - "Designation : {4}\n" - "Other Node Roles : {5}\n" - "Admin Login Name : {6}\n" - "Other Node Roles Alias : {7}\n" - "Is Quorum Node : {8}\n" - "Is Manager Node : {9}\n" - "Is TCT Node : {10}\n" - "Is Gateway Node : {11}\n" - "Is CTDB Node : {12}\n" - "Is IO Node : {13}\n" - "Is SNMP Node : {14}\n" - "Is Teal Node : {15}\n" - "Is Perfmon Node : {16}\n" - "Is CES Node : {17}\n" - "Is CNFS Node : {18}".format( - self.get_node_number(), - self.get_daemon_node_name(), - self.get_ip_address(), - self.get_admin_node_name(), - self.get_designation(), - self.get_other_node_roles(), - self.get_admin_login_name(), - self.get_other_node_roles_alias(), - self.is_quorum_node(), - self.is_manager_node(), - self.is_tct_node(), - self.is_gateway_node(), - self.is_ctdb_node(), - self.is_io_node(), - self.is_snmp_node(), - self.is_teal_node(), - self.is_perfmon_node(), - self.is_ces_node(), - self.is_cnfs_node())) - - - @staticmethod - def get_state(node_names=[], admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmgetstate")]) - - if len(node_names) == 0: - cmd.append("-a") - else: - # If a set of node names have ben provided, use that instead - node_name_str = ','.join(node_names) - cmd.append("-N") - cmd.append(node_name_str) - - cmd.append("-Y") - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving the node state failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - node_state_dict = parse_unique_records(stdout) - node_state_list = node_state_dict["mmgetstate"] - - node_state = {} - for node in node_state_list: - node_state[node["nodeName"]] = node["state"] - - return node_state - - - @staticmethod - def shutdown_node(node_name, wait=True, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - if isinstance(node_name, str): - node_name_str = node_name - node_name_list = [node_name] - else: - node_name_str = ' '.join(node_name) - node_name_list = node_name - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmshutdown"), "-N", node_name_str]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Shutting down node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - if wait: - # Wait for a maximum of 36 * 5 = 180 seconds (3 minutes) - MAX_RETRY = 36 - retry = 0 - done = False - while(not done and retry < MAX_RETRY): - time.sleep(5) - node_state = SpectrumScaleNode.get_state(node_name_list, admin_ip) - done = all("down" in state for state in list(node_state.values())) - retry = retry + 1 - - if not done: - raise SpectrumScaleException("Shutting down node(s) timed out", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], -1, "", - "Node state is not \"down\" after retries") - return rc, stdout - - - @staticmethod - def start_node(node_name, wait=True, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - if isinstance(node_name, str): - node_name_str = node_name - node_name_list = [node_name] - else: - node_name_str = ' '.join(node_name) - node_name_list = node_name - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmstartup"), "-N", node_name_str]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Starting node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - if wait: - # Wait for a maximum of 36 * 5 = 180 seconds (3 minutes) - MAX_RETRY = 36 - retry = 0 - done = False - while(not done and retry < MAX_RETRY): - time.sleep(5) - node_state = SpectrumScaleNode.get_state(node_name_list, admin_ip) - done = all("active" in state for state in list(node_state.values())) - retry = retry + 1 - - if not done: - raise SpectrumScaleException("Starting node(s) timed out", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], -1, "", - "Node state is not \"active\" after retries") - return rc, stdout - - -class SpectrumScaleCluster: - - def __retrieve_cluster_info(self, admin_ip): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlscluster"), "-Y"]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving the cluster information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - return parse_aggregate_cmd_output(stdout, - ["clusterSummary", - "cnfsSummary", - "cesSummary"]) - - def __init__(self, admin_ip=None): - self.cluster_dict = self.__retrieve_cluster_info(admin_ip) - self.name = self.cluster_dict["clusterSummary"]["clusterName"] - self.c_id = self.cluster_dict["clusterSummary"]["clusterId"] - self.uid_domain = self.cluster_dict["clusterSummary"]["uidDomain"] - self.rsh_path = self.cluster_dict["clusterSummary"]["rshPath"] - self.rsh_sudo_wrapper = self.cluster_dict["clusterSummary"]["rshSudoWrapper"] - self.rcp_path = self.cluster_dict["clusterSummary"]["rcpPath"] - self.rcp_sudo_wrapper = self.cluster_dict["clusterSummary"]["rcpSudoWrapper"] - self.repository_type = self.cluster_dict["clusterSummary"]["repositoryType"] - self.primary_server = self.cluster_dict["clusterSummary"]["primaryServer"] - self.secondary_server = self.cluster_dict["clusterSummary"]["secondaryServer"] - - - def get_name(self): - return self.name - - def get_id(self): - return self.c_id - - def get_uid_domain(self): - return self.uid_domain - - def get_rsh_path(self): - return self.rsh_path - - def get_rsh_sudo_wrapper(self): - return self.rsh_sudo_wrapper - - def get_rcp_path(self): - return self.rcp_path - - def get_rcp_sudo_wrapper(self): - return self.rcp_sudo_wrapper - - def get_repository_type(self): - return self.repository_type - - def get_primary_server(self): - return self.primary_server - - def get_secondary_server(self): - return self.secondary_server - - def __str__(self): - return str("Cluster Name : {0}\n" - "Cluster ID : {1}\n" - "UID Domain : {2}\n" - "rsh Path : {3}\n" - "rsh Sudo Wrapper: {4}\n" - "rcp Path : {5}\n" - "rcp Sudo Wrapper: {6}\n" - "Repository Type : {7}\n" - "Primary Server : {8}\n" - "Secondary Server: {9}".format( - self.get_name(), - self.get_id(), - self.get_uid_domain(), - self.get_rsh_path(), - self.get_rsh_sudo_wrapper(), - self.get_rcp_path(), - self.get_rcp_sudo_wrapper(), - self.get_repository_type(), - self.get_primary_server(), - self.get_secondary_server())) - - def to_json(self): - return json.dumps(self.cluster_dict) - - def get_cluster_dict(self): - return self.cluster_dict - - def get_nodes(self): - node_list = [] - for node in self.cluster_dict["clusterNode"]: - node_instance = SpectrumScaleNode(node) - node_list.append(node_instance) - - return node_list - - @staticmethod - def delete_node(node_name, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - if isinstance(node_name, str): - node_name_str = node_name - else: - node_name_str = ' '.join(node_name) - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdelnode"), "-N", node_name_str]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting node from cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - return rc, stdout - - - @staticmethod - def add_node(node_name, stanza_path, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - if isinstance(node_name, str): - node_name_str = node_name - else: - node_name_str = ' '.join(node_name) - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmaddnode"), - "-N", stanza_path, "--accept"]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Adding node to cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - return rc, stdout, stderr - - - @staticmethod - def apply_license(node_name, license, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - if isinstance(node_name, str): - node_name_str = node_name - else: - node_name_str = ' '.join(node_name) - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmchlicense"), license, - "--accept", "-N", node_name_str]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Changing license on node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - return rc, stdout - - - @staticmethod - def create_cluster(name, stanza_path, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmcrcluster"), "-N", stanza_path, - "-C", name]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Creating cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - return rc, stdout - - - @staticmethod - def delete_cluster(name, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdelnode"), "-a"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - return rc, stdout - - -def main(): - cluster = SpectrumScaleCluster() - print((cluster.to_json())) - print("\n") - - for node in cluster.get_nodes(): - print(node) - print("\n") - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py deleted file mode 100644 index 6a9dfea4..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_aggregate_cmd_output, parse_unique_records, GPFS_CMD_PATH, \ - RC_SUCCESS, SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_aggregate_cmd_output, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleDf: - nsd_df = {} - - def __init__(self, nsd_df_dict): - self.node = nsd_df_dict - - def get_nsd_name(self): - nsd_name = self.node["nsdName"] - return nsd_name - - def get_storage_pool(self): - pool = self.node["storagePool"] - return pool - - def get_disk_size(self): - disk_size = self.node["diskSize"] - if disk_size: - return int(disk_size) - return 0 - - def get_failure_group(self): - fg = self.node["failureGroup"] - return fg - - def stores_meta_data(self): - meta = self.node["metadata"] - return meta - - def stores_data(self): - data = self.node["data"] - return data - - def get_free_blocks(self): - free_blocks = self.node["freeBlocks"] - if free_blocks: - return int(free_blocks) - return 0 - - def get_free_blocks_pct(self): - free_blocks_pct = self.node["freeBlocksPct"] - if free_blocks_pct: - return int(free_blocks_pct) - return 0 - - def get_free_fragments(self): - free_fragments = self.node["freeFragments"] - if free_fragments: - return int(free_fragments) - return 0 - - def get_free_fragments_pct(self): - free_fragments_pct = self.node["freeFragmentsPct"] - if free_fragments_pct: - return int(free_fragments_pct) - return 0 - - def get_disk_available_for_alloc(self): - disk_available_for_alloc = self.node["diskAvailableForAlloc"] - return disk_available_for_alloc - - def to_json(self): - return json.dumps(self.nsd_df_dict) - - def get_nsd_df_dict(self): - return self.nsd_df_dict - - def print_nsd_df(self): - print(("NSD Name : {0}".format(self.get_nsd_name()))) - print(("Storage Pool : {0}".format(self.get_storage_pool()))) - print(("Disk Size : {0}".format(self.get_disk_size()))) - print(("Failure Group : {0}".format(self.get_failure_group()))) - print(("Stores Metadata : {0}".format(self.stores_meta_data()))) - print(("Stores Data : {0}".format(self.stores_data()))) - print(("Free Blocks : {0}".format(self.get_free_blocks()))) - print(("Free Blocks % : {0}".format(self.get_free_blocks_pct()))) - print(("Free Fragments : {0}".format(self.get_free_fragments()))) - print(("Free Fragments % : {0}".format(self.get_free_fragments_pct()))) - print(("Disk Available For Alloc: {0}".format(self.get_disk_available_for_alloc()))) - - - @staticmethod - def get_df_info(filesystem_name, admin_ip=None): - nsd_df_info_list = [] - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - # TODO - # The original code executed the command "/usr/lpp/mmfs/bin/mmdf -d -Y" - # but this did not work if there were multiple Pools with a separate System Pool. - # Therefore the "-d" flag has been removed. Check to see why the "-d" flag was - # was used in the first place - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdf"), filesystem_name, "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving filesystem disk space usage failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - df_dict = parse_aggregate_cmd_output(stdout, ["poolTotal", "data", - "metadata", "fsTotal", - "inode"]) - - nsd_df_list = df_dict["nsd"] - - for nsd_df in nsd_df_list: - nsd_df_instance = SpectrumScaleDf(nsd_df) - nsd_df_info_list.append(nsd_df_instance) - - return nsd_df_info_list - - -def main(): - # TODO: Dynamically fetch the Filesystem Names - nsd_df_list = get_nsd_df_info("FS1") - for nsd_df in nsd_df_list: - nsd_df.print_nsd_df() - print("\n") - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py deleted file mode 100644 index 1f6837f4..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import sys -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, \ - SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_unique_records, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleDisk: - disk = {} - filesystem = "" - - def __init__(self, disk_dict, fs_name): - self.disk = disk_dict - self.filesystem = fs_name - - def get_nsd_name(self): - nsd_name = self.disk["nsdName"] - return nsd_name - - def get_driver_type(self): - driver_type = self.disk["driverType"] - return driver_type - - def get_sector_size(self): - sector_size = self.disk["sectorSize"] - return sector_size - - def get_failure_group(self): - failure_group = self.disk["failureGroup"] - return failure_group - - def contains_metadata(self): - metadata = self.disk["metadata"] - if "yes" in metadata: - return True - return False - - def contains_data(self): - data = self.disk["data"] - if "yes" in data: - return True - return False - - def get_status(self): - status = self.disk["status"] - return status - - def get_availability(self): - availability = self.disk["availability"] - return availability - - def get_disk_id(self): - disk_id = self.disk["diskID"] - return disk_id - - def get_storage_pool(self): - pool_name = self.disk["storagePool"] - return pool_name - - def get_remarks(self): - remarks = self.disk["remarks"] - return remarks - - def get_num_quorum_disks(self): - num_qd_str = self.disk["numQuorumDisks"] - num_quorum_disks = int(num_qd_str) - return num_quorum_disks - - def get_read_quorum_value(self): - read_qv_str = self.disk["readQuorumValue"] - read_quorum_value = int(read_qv_str) - return read_quorum_value - - def get_write_quorum_value(self): - write_qv_str = self.disk["writeQuorumValue"] - write_quorum_value = int(write_qv_str) - return write_quorum_value - - def get_disk_size_KB(self): - disk_sz_str = self.disk["diskSizeKB"] - disk_size_KB = int(disk_sz_str) - return disk_size_KB - - def get_disk_UID(self): - disk_uid = self.disk["diskUID"] - return disk_uid - - def get_thin_disk_type(self): - thin_disk_type = self.disk["thinDiskType"] - return thin_disk_type - - def to_json(self): - return json.dumps(self.disk) - - def print_disk(self): - print(("NSD Name : {0}".format(self.get_nsd_name()))) - print(("Driver Type : {0}".format(self.get_driver_type()))) - print(("Sector Size : {0}".format(self.get_sector_size()))) - print(("Failure Group : {0}".format(self.get_failure_group()))) - print(("Contains Metadata : {0}".format(self.contains_metadata()))) - print(("Contains Data : {0}".format(self.contains_data()))) - print(("Status : {0}".format(self.get_status()))) - print(("Availability : {0}".format(self.get_availability()))) - print(("Disk ID : {0}".format(self.get_disk_id()))) - print(("Storage Pool : {0}".format(self.get_storage_pool()))) - print(("Remarks : {0}".format(self.get_remarks()))) - print(("Num Quorum Disks : {0}".format(self.get_num_quorum_disks()))) - print(("Read Quorum Value : {0}".format(self.get_read_quorum_value()))) - print(("Write Quorum Value : {0}".format(self.get_write_quorum_value()))) - print(("NSD Disk Size (KB) : {0}".format(self.get_disk_size_KB()))) - print(("Disk UID : {0}".format(self.get_disk_UID()))) - print(("Thin Disk Type : {0}".format(self.get_thin_disk_type()))) - - @staticmethod - def get_all_disk_info(fs_name, admin_ip=None): - disk_info_list = [] - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlsdisk"), fs_name, "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc == RC_SUCCESS: - # TODO: Check the return codes and examine other possibility and verify below - if "No disks were found" in stderr: - return nsd_info_list - else: - raise SpectrumScaleException("Retrieving disk information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - disk_dict = parse_unique_records(stdout) - disk_list = disk_dict["mmlsdisk"] - - for disk in disk_list: - disk_instance = SpectrumScaleDisk(disk, fs_name) - disk_info_list.append(disk_instance) - - return disk_info_list - - - @staticmethod - def delete_disk(node_name, filesystem_name, disk_names, admin_ip=None): - """ - This function performs "mmdeldisk". - Args: - node_name (str): Node for which disk needs to be deleted. - filesystems_name (str): Filesystem name associated with the disks. - disk_names (list): Disk name to be deleted. - Ex: ['gpfs1nsd', 'gpfs2nsd', 'gpfs3nsd'] - """ - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - disk_name_str = ";".join(disk_names) - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdeldisk"), filesystem_name, - disk_name_str, '-N', node_name]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting disk(s) failed. ", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - -def main(): - if len(sys.argv) == 2: - fs_name = sys.argv[1] - try: - disk_list = get_all_disk_info(fs_name) - for disk in disk_list: - disk.print_disk() - print("\n") - except Exception as e: - print(e) - else: - print("The file system name should be specified") - rc = 1 - - -if __name__ == "__main__": - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py deleted file mode 100644 index dd685979..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py +++ /dev/null @@ -1,420 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_simple_cmd_output, GPFS_CMD_PATH, RC_SUCCESS, \ - SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_simple_cmd_output, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleFS: - - def __init__(self, device_name, filesystem_properties): - self.device_name = device_name - self.properties_list = filesystem_properties - - def __get_property_as_str(self, prop_name): - str_prop_value = "" - for fs_property in self.properties_list: - if prop_name in fs_property["fieldName"]: - str_prop_value = fs_property["data"] - return str_prop_value - - def __get_property_as_int(self, prop_name): - int_prop_value = 0 - for fs_property in self.properties_list: - if prop_name in fs_property["fieldName"]: - int_prop_value = int(fs_property["data"]) - return int_prop_value - - def __get_property_as_bool(self, prop_name): - bool_prop_value = False - for fs_property in self.properties_list: - if prop_name in fs_property["fieldName"]: - if ("Yes" in fs_property["data"] or - "yes" in fs_property["data"]): - bool_prop_value = True - return bool_prop_value - - def get_device_name(self): - return self.device_name - - def get_syspool_min_fragment_size(self): - syspool_min_fragment_size = 0 - for fs_property in self.properties_list: - if ("minFragmentSize" in fs_property["fieldName"] and - "system pool" in fs_property["remarks"]): - syspool_min_fragment_size = int(fs_property["data"]) - return syspool_min_fragment_size - - def get_other_pool_min_fragment_size(self): - other_pool_min_fragment_size = 0 - for fs_property in self.properties_list: - if ("minFragmentSize" in fs_property["fieldName"] and - "other pools" in fs_property["remarks"]): - other_pool_min_fragment_size = int(fs_property["data"]) - return other_pool_min_fragment_size - - def get_inode_size(self): - return self.__get_property_as_int("inodeSize") - - def get_indirect_block_size(self): - return self.__get_property_as_int("indirectBlockSize") - - def get_default_metadata_replicas(self): - return self.__get_property_as_int("defaultMetadataReplicas") - - def get_max_metadata_replicas(self): - return self.__get_property_as_int("maxMetadataReplicas") - - def get_default_data_replicas(self): - return self.__get_property_as_int("defaultDataReplicas") - - def get_max_data_replicas(self): - return self.__get_property_as_int("maxDataReplicas") - - def get_block_allocation_type(self): - return self.__get_property_as_str("blockAllocationType") - - def get_file_locking_semantics(self): - return self.__get_property_as_str("fileLockingSemantics") - - def get_acl_semantics(self): - return self.__get_property_as_str("ACLSemantics") - - def get_num_nodes(self): - return self.__get_property_as_int("numNodes") - - def get_syspool_block_size(self): - syspool_block_size = 0 - for fs_property in self.properties_list: - if ("blockSize" in fs_property["fieldName"] and - "system pool" in fs_property["remarks"]): - syspool_block_size = int(fs_property["data"]) - return syspool_block_size - - def get_other_pool_block_size(self): - other_pool_block_size = 0 - for fs_property in self.properties_list: - if ("blockSize" in fs_property["fieldName"] and - "other pools" in fs_property["remarks"]): - other_pool_block_size = int(fs_property["data"]) - return other_pool_block_size - - def get_quotas_accounting_enabled(self): - return self.__get_property_as_str("quotasAccountingEnabled") - - def get_quotas_enforced(self): - return self.__get_property_as_str("quotasEnforced") - - def get_default_quotas_enabled(self): - return self.__get_property_as_str("defaultQuotasEnabled") - - def get_per_fileset_quotas(self): - return self.__get_property_as_bool("perfilesetQuotas") - - def is_fileset_df_enabled(self): - return self.__get_property_as_bool("filesetdfEnabled") - - def get_filesystem_version(self): - return self.__get_property_as_str("filesystemVersion") - - def get_filesystem_version_local(self): - return self.__get_property_as_str("filesystemVersionLocal") - - def get_filesystem_version_manager(self): - return self.__get_property_as_str("filesystemVersionManager") - - def get_filesystem_version_original(self): - return self.__get_property_as_str("filesystemVersionOriginal") - - def get_filesystem_highest_supported(self): - return self.__get_property_as_str("filesystemHighestSupported") - - def get_create_time(self): - return self.__get_property_as_str("create-time") - - def is_dmapi_enabled(self): - return self.__get_property_as_bool("DMAPIEnabled") - - def get_logfile_size(self): - return self.__get_property_as_int("logfileSize") - - def is_exact_m_time(self): - return self.__get_property_as_bool("exactMtime") - - def get_suppress_atime(self): - return self.__get_property_as_str("suppressAtime") - - def get_strict_replication(self): - return self.__get_property_as_str("strictReplication") - - def is_fast_ea_enabled(self): - return self.__get_property_as_bool("fastEAenabled") - - def is_encrypted(self): - return self.__get_property_as_bool("encryption") - - def get_max_number_of_inodes(self): - return self.__get_property_as_int("maxNumberOfInodes") - - def get_max_snapshot_id(self): - return self.__get_property_as_int("maxSnapshotId") - - def get_uid(self): - return self.__get_property_as_str("UID") - - def get_log_replicas(self): - return self.__get_property_as_int("logReplicas") - - def is_4k_aligned(self): - return self.__get_property_as_bool("is4KAligned") - - def is_rapid_repair_enabled(self): - return self.__get_property_as_bool("rapidRepairEnabled") - - def get_write_cache_threshold(self): - return self.__get_property_as_int("write-cache-threshold") - - def get_subblocks_per_full_block(self): - return self.__get_property_as_int("subblocksPerFullBlock") - - def get_storage_pools(self): - storage_pool_list = [] - storage_pool_str = self.__get_property_as_str("storagePools") - if storage_pool_str: - storage_pool_list = storage_pool_str.split(";") - return storage_pool_list - - def is_file_audit_log_enabled(self): - return self.__get_property_as_bool("file-audit-log") - - def is_maintenance_mode(self): - return self.__get_property_as_bool("maintenance-mode") - - def get_disks(self): - disk_list = [] - disk_str = self.__get_property_as_str("disks") - if disk_str: - disk_list = disk_str.split(";") - return disk_list - - def is_automatic_mount_option_enabled(self): - return self.__get_property_as_bool("automaticMountOption") - - def get_additional_mount_options(self): - return self.__get_property_as_str("additionalMountOptions") - - def get_default_mount_point(self): - return self.__get_property_as_str("defaultMountPoint") - - def get_mount_priority(self): - return self.__get_property_as_int("mountPriority") - - def get_properties_list(self): - return self.properties_list - - def to_json(self): - # TODO: Include Filesystem Device Name - return json.dumps(self.properties_list) - - def print_filesystem(self): - print(("Device Name : {0}".format(self.get_device_name()))) - print(("Syspool Min Fragment Size : {0}".format(self.get_syspool_min_fragment_size()))) - print(("Other Pool Min Fragment Size : {0}".format(self.get_other_pool_min_fragment_size()))) - print(("Inode Size : {0}".format(self.get_inode_size()))) - print(("Indirect Block Size : {0}".format(self.get_indirect_block_size()))) - print(("Default Metadata Replicas : {0}".format(self.get_default_metadata_replicas()))) - print(("Max Metadata Replicas : {0}".format(self.get_max_metadata_replicas()))) - print(("Default Data Replicas : {0}".format(self.get_default_data_replicas()))) - print(("Max Data Replicas : {0}".format(self.get_max_data_replicas()))) - print(("Block Allocation Type : {0}".format(self.get_block_allocation_type()))) - print(("File Locking Semantics : {0}".format(self.get_file_locking_semantics()))) - print(("ACL Semantics : {0}".format(self.get_acl_semantics()))) - print(("Num Nodes : {0}".format(self.get_num_nodes()))) - print(("Syspool Block Size : {0}".format(self.get_syspool_block_size()))) - print(("Other Pool Block Size : {0}".format(self.get_other_pool_block_size()))) - print(("Quotas Accounting Enabled : {0}".format(self.get_quotas_accounting_enabled()))) - print(("Quotas Enforced : {0}".format(self.get_quotas_enforced()))) - print(("Default Quotas Enabled : {0}".format(self.get_default_quotas_enabled()))) - print(("Per Fileset Quotas : {0}".format(self.get_per_fileset_quotas()))) - print(("Fileset df Enabled : {0}".format(self.is_fileset_df_enabled()))) - print(("Filesystem Version : {0}".format(self.get_filesystem_version()))) - print(("Filesystem Version Local : {0}".format(self.get_filesystem_version_local()))) - print(("Filesystem Version Manager : {0}".format(self.get_filesystem_version_manager()))) - print(("Filesystem Version Original : {0}".format(self.get_filesystem_version_original()))) - print(("Filesystem Highest Supported : {0}".format(self.get_filesystem_highest_supported()))) - print(("Create Time : {0}".format(self.get_create_time()))) - print(("DMAPI Enabled : {0}".format(self.is_dmapi_enabled()))) - print(("Logfile Size : {0}".format(self.get_logfile_size()))) - print(("Is Exact m Time : {0}".format(self.is_exact_m_time()))) - print(("Suppress atime : {0}".format(self.get_suppress_atime()))) - print(("Strict Replication : {0}".format(self.get_strict_replication()))) - print(("Is Fast EA Enabled : {0}".format(self.is_fast_ea_enabled()))) - print(("Is Encrypted : {0}".format(self.is_encrypted()))) - print(("Max Number Of Inodes : {0}".format(self.get_max_number_of_inodes()))) - print(("Max Snapshot Id : {0}".format(self.get_max_snapshot_id()))) - print(("UID : {0}".format(self.get_uid()))) - print(("Log Replicas : {0}".format(self.get_log_replicas()))) - print(("Is 4K Aligned : {0}".format(self.is_4k_aligned()))) - print(("Is Rapid Repair Enabled : {0}".format(self.is_rapid_repair_enabled()))) - print(("Write Cache Threshold : {0}".format(self.get_write_cache_threshold()))) - print(("Subblocks Per Full Block : {0}".format(self.get_subblocks_per_full_block()))) - print(("Storage Pools : {0}".format(self.get_storage_pools()))) - print(("Is File Audit Log Enabled : {0}".format(self.is_file_audit_log_enabled()))) - print(("Is Maintenance Mode : {0}".format(self.is_maintenance_mode()))) - print(("Disks : {0}".format(self.get_disks()))) - print(("Is Automatic Mount Option Enabled : {0}".format(self.is_automatic_mount_option_enabled()))) - print(("Additional Mount Options : {0}".format(self.get_additional_mount_options()))) - print(("Default Mount Point : {0}".format(self.get_default_mount_point()))) - print(("Mount Priority : {0}".format(self.get_mount_priority()))) - - - @staticmethod - def get_filesystems(admin_ip=None): - filesystem_info_list = [] - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlsfs"), "all", "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - if 'mmlsfs: No file systems were found.' in stdout or \ - 'mmlsfs: No file systems were found.' in stderr: - return filesystem_info_list - - raise SpectrumScaleException("Retrieving filesystem information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - filesystem_dict = parse_simple_cmd_output(stdout, "deviceName", - "properties", "filesystems") - filesystem_list = filesystem_dict["filesystems"] - - for filesystem in filesystem_list: - device_name = filesystem["deviceName"] - fs_properties = filesystem["properties"] - filesystem_instance = SpectrumScaleFS(device_name, fs_properties) - filesystem_info_list.append(filesystem_instance) - - return filesystem_info_list - - - @staticmethod - def unmount_filesystems(node_name, wait=True, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmumount"), "all", "-N", node_name]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - if 'mmumount: No file systems were found' in stdout or \ - 'mmumount: No file systems were found' in stderr: - # We can claim success on umount if there are no filesystems - return RC_SUCCESS - - raise SpectrumScaleException("Unmounting filesystems on node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, stdout, stderr) - return rc, stdout - - - @staticmethod - def create_filesystem(name, stanza_path, block_size, - default_metadata_replicas, - default_data_replicas, num_nodes, - automatic_mount_option, - default_mount_point, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmcrfs"), name, - "-F", stanza_path, - "-B", block_size, - "-m", default_metadata_replicas, - "-r", default_data_replicas, - "-n", num_nodes, - "-A", automatic_mount_option, - "-T", default_mount_point]) - # TODO: Make this idempotent - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Create filesystems on node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - return rc, stdout - - - @staticmethod - def delete_filesystem(name): - # TODO: Implement - rc = RC_SUCCESS - msg = "" - return rc, msg - - -def main(): - filesystem_list = get_filesystems() - for filesystem in filesystem_list: - filesystem.print_filesystem() - print("\n") - - -if __name__ == "__main__": - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py deleted file mode 100644 index 44ba0e59..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, \ - SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_unique_records, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleNSD: - def __init__(self, nsd_dict): - self.nsd = nsd_dict - - def get_name(self): - name = self.nsd["diskName"] - return name - - def get_volume_id(self): - volumeId = self.nsd["volumeId"] - return volumeId - - def get_server_list(self): - server_list = [] - server_list_str = self.nsd["serverList"] - if server_list_str: - server_list = server_list_str.split(",") - return server_list - - def get_device_type(self): - device_type = self.nsd["deviceType"] - return device_type - - def get_disk_name(self): - disk_name = self.nsd["localDiskName"] - return disk_name - - def get_remarks(self): - remarks = self.nsd["remarks"] - return remarks - - def to_json(self): - return json.dumps(self.nsd) - - def print_nsd(self): - print(("NSD Name : {0}".format(self.get_name()))) - print(("Volume ID : {0}".format(self.get_volume_id()))) - print(("Server List: {0}".format(self.get_server_list()))) - print(("Device Type: {0}".format(self.get_device_type()))) - print(("Disk Name : {0}".format(self.get_disk_name()))) - print(("Remarks : {0}".format(self.get_remarks()))) - - - @staticmethod - def get_all_nsd_info(admin_ip=None): - nsd_info_list = [] - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlsnsd"),"-a", "-X", "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc == RC_SUCCESS: - if "No disks were found" in stderr: - return nsd_info_list - else: - raise SpectrumScaleException("Retrieving NSD information Failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - nsd_dict = parse_unique_records(stdout) - nsd_list = nsd_dict["nsd"] - - for nsd in nsd_list: - nsd_instance = SpectrumScaleNSD(nsd) - nsd_info_list.append(nsd_instance) - - return nsd_info_list - - - @staticmethod - def delete_nsd(nsd_list, admin_ip=None): - nsd_names = ";".join(nsd_list) - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdelnsd"), nsd_names]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting NSD(s) Failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - - @staticmethod - def remove_server_access_to_nsd(nsd_to_delete, node_to_delete, - nsd_attached_to_nodes, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - # mmchnsd "nsd1:node1.domain.com" - server_access_list = ','.join(map(str, nsd_attached_to_nodes)) - server_access_list = nsd_to_delete+":"+server_access_list - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmchnsd"), server_access_list]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - e_msg = ("Exception encountered during execution of modifying NSD " - "server access list for NSD={0} on Node={1}. Exception " - "Message={2)".format(nsd_to_delete, node_to_delete, e)) - raise SpectrumScaleException(e_msg, cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - if rc != RC_SUCCESS: - e_msg = ("Failed to modify NSD server access list for NSD={0} on " - "Node={1}".format(nsd_to_delete, node_to_delete)) - raise SpectrumScaleException(e_msg, cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - -def main(): - try: - nsd_list = SpectrumScaleNSD.get_all_nsd_info() - for nsd in nsd_list: - nsd.print_nsd() - print("\n") - except Exception as e: - print(e) - -if __name__ == "__main__": - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_utils.py deleted file mode 100755 index fafe5be0..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_utils.py +++ /dev/null @@ -1,688 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import sys -import json -import time -import subprocess -import threading -import logging -import signal -import urllib.request, urllib.parse, urllib.error -import urllib.parse -import types -from collections import OrderedDict - -GPFS_CMD_PATH = "/usr/lpp/mmfs/bin" -RC_SUCCESS = 0 -CMD_TIMEDOUT = "CMD_TIMEDOUT" - -class SpectrumScaleException(Exception): - _expmsg="" - _mmcmd="" - _cmdargs="" - _rc=0 - _stdout="" - _stderr="" - - def __init__(self, msg, mmcmd, cmdargs, rc, stdout, stderr): - self._expmsg = msg - self._mmcmd = mmcmd - self._cmdargs = nsd_names = " ".join(cmdargs) - self._rc = rc - self._stdout = stdout - self._stderr = stderr - - def get_message(self): - return self._expmsg - - def __str__(self): - error_str = ("{0}. " - "Command: \"{1}\". " - "Arguments: \"{2}\". " - "Error Code: {3}. " - "Error Message: \"{4}\". ").format(self._expmsg, - self._mmcmd, - self._cmdargs, - self._rc, - self._stderr) - - return error_str - - -###################################### -## ## -## Logger Functions ## -## ## -###################################### -class SpectrumScaleLogger: - logger = None - - @staticmethod - def get_logger(): - if SpectrumScaleLogger.logger == None: - logger = logging.getLogger() - logger.setLevel(logging.DEBUG) - - log_file_handler = logging.FileHandler('/var/log/ibm_specscale_ansible.log') - log_file_handler.setLevel(logging.DEBUG) - log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - log_file_handler.setFormatter(log_formatter) - logger.addHandler(log_file_handler) - - # TODO: Enable once the "Shared Connection eror is rectified" - # console_handler = logging.StreamHandler() - # console_handler.setLevel(logging.INFO) - # console_formatter = logging.Formatter('[%(levelname)s] %(message)s') - # console_handler.setFormatter(console_formatter) - # logger.addHandler(console_handler) - - SpectrumScaleLogger.logger = logger - - return SpectrumScaleLogger.logger - - @staticmethod - def shutdown(): - if SpectrumScaleLogger.logger: - logging.shutdown() - - -###################################### -## ## -## Utility Functions ## -## ## -###################################### -def decode(input_string): - return urllib.parse.unquote(input_string) - - -def _stop_process(proc, logger, log_cmd, timeout): - try: - if proc.poll() is None: - logger.info("Command %s timed out after %s sec. Sending SIGTERM", log_cmd, timeout) - print(("Command %s timed out after %s sec. Sending SIGTERM", log_cmd, timeout)) - os.kill(proc.pid, signal.SIGTERM) # SIGKILL or SIGTERM - - time.sleep(0.5) - if proc.poll() is None: - logger.info("Command %s timed out after %s sec. Sending SIGKILL", log_cmd, timeout) - print(("Command %s timed out after %s sec. Sending SIGKILL", log_cmd, timeout)) - os.kill(proc.pid, signal.SIGKILL) - except Exception as e: - logger.warning(str(e)) - print((str(e))) - - -def runCmd(cmd, timeout=300, sh=False, env=None, retry=0): - """ - Execute an external command, read the output and return it. - @param cmd (str|list of str): command to be executed - @param timeout (int): timeout in sec, after which the command is forcefully terminated - @param sh (bool): True if the command is to be run in a shell and False if directly - @param env (dict): environment variables for the new process (instead of inheriting from the current process) - @param retry (int): number of retries on command timeout - @return: (stdout, stderr, rc) (str, str, int): the output of the command - """ - - logger = SpectrumScaleLogger.get_logger() - - if isinstance(cmd, str): - log_cmd = cmd - else: - log_cmd = ' '.join(cmd) - - t_start = time.time() - try: - if env is not None: - fullenv = dict(os.environ) - fullenv.update(env) - env = fullenv - # create the subprocess, ensuring a new process group is spawned - # so we can later kill the process and all its child processes - proc = subprocess.Popen(cmd, shell=sh, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - close_fds=False, env=env, universal_newlines=True) - - timer = threading.Timer(timeout, _stop_process, [proc, logger, log_cmd, timeout]) - timer.start() - - (sout, serr) = proc.communicate() - timer.cancel() # stop the timer when we got data from process - - ret = proc.poll() - except OSError as e: - logger.debug(str(e)) - sout = "" - serr = str(e) - ret = 127 if "No such file" in serr else 255 - finally: - try: - proc.stdout.close() - proc.stderr.close() - except: #pylint: disable=bare-except - pass - - t_run = time.time() - t_start - logger.debug("runCmd: Command executed: {0} Start time: {1} End time: {2} " - "Total time: {3}".format(log_cmd, t_start, - time.time(), t_run)) - - cmd_timeout = ret in (-signal.SIGTERM, -signal.SIGKILL) # 143,137 - if ret == -6 and retry >= 0 : # special handling for sigAbrt - logger.warning("runCmd: retry abrt %s with subprocess %s", cmd, s32) - (sout, serr, ret) = runCmd(cmd, timeout, sh, env, -1) - - if cmd_timeout and retry > 0: - retry -= 1 - logger.warning("runCmd: Retry command %s counter: %s", cmd, retry) - (sout, serr, ret) = runCmd(cmd, timeout, sh, env, retry) - elif cmd_timeout: - serr = CMD_TIMEDOUT - logger.warning("runCmd: %s Timeout:%d ret:%s", cmd, timeout, ret) - else: - logger.debug("runCmd: %s :(%d) ret:%s \n%s \n%s", cmd, timeout, ret, serr, sout) - - return (sout, serr, ret) - - -###################################### -## ## -## Parse Functions ## -## ## -###################################### - -# NOTE: The machine parsable "mm" list (-Y) commands fall into three categories -# in terms of how the information is organized and therefore should be parsed. -# Each of these different formats are listed below along with the appropriate -# parsing functions - -############################################# -# # -# TYPE 1 # -# # -############################################# -# -# "mm" command output type #1 -# -# mmlscluster:clusterSummary:HEADER:version:reserved:reserved:clusterName:clusterId:uidDomain:rshPath:rshSudoWrapper:rcpPath:rcpSudoWrapper:repositoryType:primaryServer:secondaryServer: -# mmlscluster:clusterNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName:ipAddress:adminNodeName:designation:otherNodeRoles:adminLoginName:otherNodeRolesAlias: -# mmlscluster:cnfsSummary:HEADER:version:reserved:reserved:cnfsSharedRoot:cnfsMoundPort:cnfsNFSDprocs:cnfsReboot:cnfsMonitorEnabled:cnfsGanesha: -# mmlscluster:cnfsNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName:ipAddress:cnfsState:cnfsGroupId:cnfsIplist: -# mmlscluster:cesSummary:HEADER:version:reserved:reserved:cesSharedRoot:EnabledServices:logLevel:addressPolicy: -# mmlscluster:cesNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName:ipAddress:cesGroup:cesState:cesIpList: -# mmlscluster:cloudGatewayNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName: -# mmlscluster:clusterSummary:0:1:::cluster.domain.com:2936932203756487754:cluster.domain.com:/usr/bin/ssh:no:/usr/bin/scp:no:CCR:server-1.domain.com:: -# mmlscluster:clusterNode:0:1:::1:server-1.domain.com:10.0.0.1:server-1.domain.com:quorum:Z::perfmon: -# mmlscluster:clusterNode:0:1:::2:server-3.domain.com:10.0.0.4:server-3.domain.com:quorumManager:Z::perfmon: -# mmlscluster:clusterNode:0:1:::3:server-4.domain.com:10.0.0.4:server-4.domain.com:quorumManager:Z::perfmon: -# mmlscluster:clusterNode:0:1:::4:server-2.domain.com:10.0.0.2:server-2.domain.com::Z::perfmon: -# -# -# The above output is parsed and represented in JSON as follows: -# -# { -# "clusterSummary": { -# "version": "1", -# "clusterName": "cluster.domain.com", -# "clusterId": "2936932203756844651", -# "uidDomain": "cluster.domain.com", -# "rshPath": "/usr/bin/ssh", -# "rshSudoWrapper": "no", -# "rcpPath": "/usr/bin/scp", -# "rcpSudoWrapper": "no", -# "repositoryType": "CCR", -# "primaryServer": "server-1.domain.com", -# "secondaryServer": "" -# }, -# "clusterNode": [ -# { -# "version": "1", -# "nodeNumber": "1", -# "daemonNodeName": "server-1.domain.com", -# "ipAddress": "10.0.0.1", -# "adminNodeName": "server-1.domain.com", -# "designation": "quorum", -# "otherNodeRoles": "Z,X", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon,ces" -# }, -# { -# "version": "1", -# "nodeNumber": "2", -# "daemonNodeName": "server-3.domain.com", -# "ipAddress": "10.0.0.4", -# "adminNodeName": "server-3.domain.com", -# "designation": "quorumManager", -# "otherNodeRoles": "Z", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon" -# }, -# { -# "version": "1", -# "nodeNumber": "3", -# "daemonNodeName": "server-4.domain.com", -# "ipAddress": "10.0.0.4", -# "adminNodeName": "server-4.domain.com", -# "designation": "quorumManager", -# "otherNodeRoles": "Z", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon" -# }, -# { -# "version": "1", -# "nodeNumber": "4", -# "daemonNodeName": "server-2.domain.com", -# "ipAddress": "10.0.0.2", -# "adminNodeName": "server-2.domain.com", -# "designation": "", -# "otherNodeRoles": "Z,X", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon,ces" -# } -# ], -# "cesSummary": { -# "version": "1", -# "cesSharedRoot": "/ibm/cesSharedRoot", -# "EnabledServices": "SMB,NFS", -# "logLevel": "0", -# "addressPolicy": "even-coverage" -# }, -# "cesNode": [ -# { -# "version": "1", -# "nodeNumber": "1", -# "daemonNodeName": "server-1.domain.com", -# "ipAddress": "10.0.0.1", -# "cesGroup": "", -# "cesState": "e", -# "cesIpList": "10.0.0.5,10.0.0.6" -# }, -# { -# "version": "1", -# "nodeNumber": "4", -# "daemonNodeName": "server-2.domain.com", -# "ipAddress": "10.0.0.2", -# "cesGroup": "", -# "cesState": "e", -# "cesIpList": "10.0.0.7,10.0.0.8" -# } -# ] -#} -# -# TODO: Change function name to something more appropriate -def parse_aggregate_cmd_output(cmd_raw_out, summary_records, header_index=2): - data_out = OrderedDict() - headers = OrderedDict() - - if isinstance(cmd_raw_out, str): - lines = cmd_raw_out.splitlines() - else: - lines = cmd_raw_out - - for line in lines: - values = line.split(":") - if len(values) < 3: - continue - - command = values[0] - datatype = values[1] or values[0] - if datatype == "": - continue - - if values[header_index] == 'HEADER': - headers[datatype] = values - continue - - columnNames = headers[datatype] - - json_object = OrderedDict() - for key, value in zip(columnNames[header_index+1:], - values[header_index+1:]): - json_object[key] = decode(value) - - if "" in json_object: - del json_object[""] - if 'reserved' in json_object: - del json_object['reserved'] - - # Summary records should only exist once - if datatype in summary_records: - json_d_type = "object" - data_out[datatype] = json_object - else: - json_d_type = "array" - json_array = [] - if datatype in list(data_out.keys()): - # An element in the array already exists - json_array = data_out[datatype] - json_array.append(json_object) - data_out[datatype] = json_array - - return data_out - - -############################################# -# # -# TYPE 2 # -# # -############################################# -# -# "mm" command output type #2 -# -# mmlsfs::HEADER:version:reserved:reserved:deviceName:fieldName:data:remarks: -# mmlsfs::0:1:::FS1:minFragmentSize:8192:: -# mmlsfs::0:1:::FS1:inodeSize:4096:: -# mmlsfs::0:1:::FS1:indirectBlockSize:32768:: -# mmlsfs::0:1:::FS1:defaultMetadataReplicas:2:: -# mmlsfs::0:1:::FS1:maxMetadataReplicas:2:: -# mmlsfs::0:1:::FS1:defaultDataReplicas:1:: -# mmlsfs::0:1:::FS1:maxDataReplicas:2:: -# mmlsfs::0:1:::FS1:blockAllocationType:scatter:: -# mmlsfs::0:1:::FS1:fileLockingSemantics:nfs4:: -# mmlsfs::0:1:::FS1:ACLSemantics:nfs4:: -# mmlsfs::0:1:::FS1:numNodes:100:: -# mmlsfs::0:1:::FS1:blockSize:4194304:: -# mmlsfs::0:1:::FS1:quotasAccountingEnabled:none:: -# mmlsfs::0:1:::FS1:quotasEnforced:none:: -# mmlsfs::0:1:::FS1:defaultQuotasEnabled:none:: -# mmlsfs::0:1:::FS1:perfilesetQuotas:No:: -# mmlsfs::0:1:::FS1:filesetdfEnabled:No:: -# mmlsfs::0:1:::FS1:filesystemVersion:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemVersionLocal:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemVersionManager:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemVersionOriginal:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemHighestSupported:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:create-time:Fri Feb 21 01%3A36%3A21 2020:: -# mmlsfs::0:1:::FS1:DMAPIEnabled:No:: -# mmlsfs::0:1:::FS1:logfileSize:33554432:: -# mmlsfs::0:1:::FS1:exactMtime:Yes:: -# mmlsfs::0:1:::FS1:suppressAtime:relatime:: -# mmlsfs::0:1:::FS1:strictReplication:whenpossible:: -# mmlsfs::0:1:::FS1:fastEAenabled:Yes:: -# mmlsfs::0:1:::FS1:encryption:No:: -# mmlsfs::0:1:::FS1:maxNumberOfInodes:513024:: -# mmlsfs::0:1:::FS1:maxSnapshotId:0:: -# mmlsfs::0:1:::FS1:UID:090B5475%3A5E4F9685:: -# mmlsfs::0:1:::FS1:logReplicas:0:: -# mmlsfs::0:1:::FS1:is4KAligned:Yes:: -# mmlsfs::0:1:::FS1:rapidRepairEnabled:Yes:: -# mmlsfs::0:1:::FS1:write-cache-threshold:0:: -# mmlsfs::0:1:::FS1:subblocksPerFullBlock:512:: -# mmlsfs::0:1:::FS1:storagePools:system:: -# mmlsfs::0:1:::FS1:file-audit-log:No:: -# mmlsfs::0:1:::FS1:maintenance-mode:No:: -# mmlsfs::0:1:::FS1:disks:nsd1;nsd2:: -# mmlsfs::0:1:::FS1:automaticMountOption:yes:: -# mmlsfs::0:1:::FS1:additionalMountOptions:none:: -# mmlsfs::0:1:::FS1:defaultMountPoint:%2Fibm%2FFS1:: -# -# The above output is parsed and represented in JSON as follows: -# -#{ -# filesystems : [ -# { -# deviceName : FS1 -# properties : [ -# { -# fieldName: minFragmentSize -# data : 8192 -# remarks : "" -# }, -# { -# fieldName: inodeSize -# data : 4096 -# remarks : "" -# } -# ] -# -# }, -# { -# deviceName : FS2 -# properties : [ -# { -# fieldName: minFragmentSize -# data : 8192 -# remarks : "" -# }, -# { -# fieldName: inodeSize -# data : 4096 -# remarks : "" -# } -# ] -# -# } -# ] -#} -# -# TODO: Change function name to something more appropriate -def parse_simple_cmd_output(cmd_raw_out, cmd_key, cmd_prop_name, - datatype="", header_index=2): - data_out = OrderedDict() - headers = OrderedDict() - - if isinstance(cmd_raw_out, str): - lines = cmd_raw_out.splitlines() - else: - lines = cmd_raw_out - - for line in lines: - values = line.split(":") - if len(values) < 3: - continue - - command = values[0] - - if not datatype: - datatype = values[1] or values[0] - if datatype == "": - continue - - if values[header_index] == 'HEADER': - headers[datatype] = values - continue - - columnNames = headers[datatype] - - json_object = OrderedDict() - instance_key = "" - for key, value in zip(columnNames[header_index+1:], - values[header_index+1:]): - if cmd_key in key: - instance_key = value - else: - json_object[key] = decode(value) - - if "" in json_object: - del json_object[""] - if 'reserved' in json_object: - del json_object['reserved'] - - json_array = [] - obj_found = False - if datatype in list(data_out.keys()): - # List of OrederDict - json_array = data_out[datatype] - prop_list = [] - # Each obj is an OrderDict - for obj in json_array: - key_val = obj[cmd_key] - if instance_key in key_val: - # We found the obj to which this record should be added - prop_list = obj[cmd_prop_name] - prop_list.append(json_object) - obj[cmd_prop_name] = prop_list - obj_found = True - break - - if not obj_found: - prop_list = [] - prop_list.append(json_object) - device_dict = OrderedDict() - device_dict[cmd_key] = instance_key - device_dict[cmd_prop_name] = prop_list - json_array.append(device_dict) - - data_out[datatype] = json_array - - return data_out - - -############################################# -# # -# TYPE 3 # -# # -############################################# -# -# "mm" command output type #3 -# -# mmlsnsd:nsd:HEADER:version:reserved:reserved:fileSystem:diskName:volumeId:serverList:thinDisk: -# mmlsnsd:nsd:0:1:::FS1:nsd1:090B54755E4F84E6:server-3.domain.com,server-4.domain.com:: -# mmlsnsd:nsd:0:1:::FS1:nsd2:090B54765E4F84E8:server-4.domain.com,server-3.domain.com:: -# mmlsnsd:nsd:0:1:::FS2:nsd3:090B54755E4F84EA:server-3.domain.com,server-4.domain.com:: -# mmlsnsd:nsd:0:1:::FS2:nsd4:090B54765E4F84EC:server-4.domain.com,server-3.domain.com:: -# -# The above output is parsed and represented in JSON as follows: -# -# { -# mmlsnsd : [ -# { -# diskName : "nsd1" -# fileSystem: "FS1" -# volumeId : "090B54755E4F84E6" -# serverList: "server-3.domain.com,server-4.domain.com" -# thinDisk : "" -# } -# { -# diskName : "nsd2" -# fileSystem: "FS1" -# volumeId : "090B54765E4F84E8" -# serverList: "server-4.domain.com,server-3.domain.com" -# thinDisk : "" -# } -# { -# diskName : "nsd3" -# fileSystem: "FS2" -# volumeId : "090B54755E4F84EA" -# serverList: "server-3.domain.com,server-4.domain.com" -# thinDisk : "" -# } -# { -# diskName : "nsd4" -# fileSystem: "FS2" -# volumeId : "090B54765E4F84EC" -# serverList: "server-4.domain.com,server-3.domain.com" -# thinDisk : "" -# } -# ] -# } -# -# TODO: Change function name to something more appropriate -def parse_unique_records(cmd_raw_out, datatype="", header_index=2): - data_out = OrderedDict() - headers = OrderedDict() - - if isinstance(cmd_raw_out, str): - lines = cmd_raw_out.splitlines() - else: - lines = cmd_raw_out - - for line in lines: - values = line.split(":") - if len(values) < 3: - continue - - command = values[0] - - if not datatype: - datatype = values[1] or values[0] - if datatype == "": - continue - - if values[header_index] == 'HEADER': - headers[datatype] = values - continue - - columnNames = headers[datatype] - - json_object = OrderedDict() - for key, value in zip(columnNames[header_index+1:], - values[header_index+1:]): - json_object[key] = decode(value) - - if "" in json_object: - del json_object[""] - if 'reserved' in json_object: - del json_object['reserved'] - - json_array = [] - if datatype in list(data_out.keys()): - # List of OrederDict - json_array = data_out[datatype] - json_array.append(json_object) - - data_out[datatype] = json_array - - return data_out - - -############################################################################### -## ## -## Main Function ## -## ## -############################################################################### - -def main(): - cmd = "cluster" - if len(sys.argv) > 1: - cmd = sys.argv[1] - if "fs" in cmd: - cmd = "filesystem" - - sout = "" - serr = "" - rc = 0 - if "cluster" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlscluster"),"-Y"], sh=False) - out_list = parse_aggregate_cmd_output(sout, ["clusterSummary", "cnfsSummary", "cesSummary"]) - elif "filesystem" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsfs"),"all","-Y"], sh=False) - out_list = parse_simple_cmd_output(sout, "deviceName", "properties", "filesystems") - elif "mount" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsmount"),"all","-Y"], sh=False) - out_list = parse_simple_cmd_output(sout, "realDevName", "mounts", "filesystem_mounts") - elif "config" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsconfig"),"-Y"], sh=False) - out_list = parse_unique_records(sout) - elif "df" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmdf"),"FS1", "-Y"], sh=False) - out_list = parse_aggregate_cmd_output(sout, ["poolTotal", "data", "metadata", "fsTotal", "inode"]) - - - if rc: - print(("Error executing command: %s %s", sout, serr)) - - json_str = json.dumps(out_list, indent=2) - print(json_str) - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py deleted file mode 100644 index d65c7d63..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import re -import json -import time - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, GPFS_CMD_PATH, \ - RC_SUCCESS, SpectrumScaleException - - -def get_zimon_collectors(): - """ - This function returns zimon collector node ip's. - """ - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmperfmon"), "config", "show"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving Zimon information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - output = stdout.splitlines() - col_regex = re.compile(r'colCandidates\s=\s(?P.*)') - for cmd_line in output: - if col_regex.match(cmd_line): - collectors = col_regex.match(cmd_line).group('collectors') - - collectors = collectors.replace("\"", '').replace(" ", '') - collectors = collectors.split(',') - - return collectors - - -def main(): - zimon_collectors_list = get_zimon_collectors() - for collector in zimon_collectors_list: - print(collector) - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/tasks/main.yml b/roles/custom_module/tasks/main.yml deleted file mode 100644 index 2485fbab..00000000 --- a/roles/custom_module/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# Execute custom roles diff --git a/roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml b/roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml deleted file mode 100644 index 978da4b9..00000000 --- a/roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -- name: Test Spectrum Scale Cluster Module - hosts: controller - gather_facts: no - tasks: - - name: Query cluster information - ibm_spectrumscale_cluster: - op: 'get' - register: cluster_info_out - run_once: true - - - name: Display cluster information - debug: - msg: "{{ cluster_info_out }}" - - - name: Display cluster name - debug: - msg: "{{ (cluster_info_out.result | from_json).cluster_info.clusterSummary.clusterName }}" - - - name: Display all node name(s) - admin node name - debug: - msg: "{{ (cluster_info_out.result | from_json).cluster_info.clusterNode | map(attribute='adminNodeName') | list }}" - - # Example of using json_query - - name: Display all node name(s) - daemon node name - debug: - var: item - loop: "{{ (cluster_info_out.result | from_json).cluster_info | json_query('clusterNode[*].daemonNodeName') }}" - - - name: Display all quorum nodes - debug: - var: item - loop: "{{ (cluster_info_out.result | from_json).cluster_info | json_query(node_designation_query) }}" - vars: - node_designation_query: "clusterNode[?contains(designation, 'quorum')].adminNodeName" diff --git a/roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml b/roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml deleted file mode 100644 index 953cf5d7..00000000 --- a/roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -- name: Test Spectrum Scale Cluster Module - hosts: controller - tasks: - - name: Run Filesystem Module - ibm_spectrumscale_filesystem: - op: 'get' - register: module_out - - - name: Dump Filesystem output - debug: - msg: "{{ (module_out.result | from_json).filesystems | map(attribute='deviceName') | list }}" - diff --git a/roles/custom_module/test/node/common/AddNodeStanza.j2 b/roles/custom_module/test/node/common/AddNodeStanza.j2 deleted file mode 100644 index 1d67590a..00000000 --- a/roles/custom_module/test/node/common/AddNodeStanza.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{% for host in groups['test_add_nodes'] | sort %} -{{ hostvars[host].inventory_hostname }}:{{hostvars[host].designation }} -{% endfor %} - diff --git a/roles/custom_module/test/node/playbooks/node-add-test.yaml b/roles/custom_module/test/node/playbooks/node-add-test.yaml deleted file mode 100644 index 9c93ca30..00000000 --- a/roles/custom_module/test/node/playbooks/node-add-test.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -- name: Test adding node to Spectrum Scale cluster - hosts: controller - gather_facts: no - tasks: - - name: Generate Stanza file to add Node(s) - template: - src: ../common/AddNodeStanza.j2 - dest: /tmp/NodeFile - - - name: Add Node(s) to cluster - ibm_spectrumscale_node: - state: "present" - name: "{{ groups['test_add_nodes'][0] }}" - nodefile: "/tmp/NodeFile" - license: "client" - register: add_node_result - - diff --git a/roles/custom_module/test/node/playbooks/node-get-test.yaml b/roles/custom_module/test/node/playbooks/node-get-test.yaml deleted file mode 100644 index 3746916a..00000000 --- a/roles/custom_module/test/node/playbooks/node-get-test.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -- name: Test Spectrum Scale Node Module - hosts: controller - gather_facts: no - tasks: - - name: Query all nodes in the cluster - ibm_spectrumscale_node: - op: "get" - register: nodes_info - - - name: Display all nodes in the cluster - debug: - msg: "{{ nodes_info }}" - - - name: Query a single node in the cluster - ibm_spectrumscale_node: - op: "get" - name: "node1.domain.com" - register: node_1_info - - - name: Display a single node information - debug: - msg: "{{ node_1_info }}" - diff --git a/roles/custom_module/test/node/playbooks/node-remove-test.yaml b/roles/custom_module/test/node/playbooks/node-remove-test.yaml deleted file mode 100644 index f1432b94..00000000 --- a/roles/custom_module/test/node/playbooks/node-remove-test.yaml +++ /dev/null @@ -1,162 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -- name: Test removing node that has been shutdown (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Shutdown node - ibm_spectrumscale_node: - op: "stop" - name: "{{ groups['test_remove_nodes'][0] }}" - - - name: Attempt to remove node that is shutdown and ensure it is note removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_nodes'][0] }}" - register: result_node_remove - failed_when: > - (result_node_remove.rc == 0) or - ('FAILED' not in result_node_remove.msg) - - - name: Check to ensure node still exists - ibm_spectrumscale_node: - op: "get" - name: "{{ groups['test_remove_nodes'][0] }}" - register: result_node_info - failed_when: > - (((result_node_info.result | from_json).clusterNodes | length) != 1) or - (groups['test_remove_nodes'][0] not in (result_node_info.result | from_json).clusterNodes[0].adminNodeName) - - - name: Startup node - ibm_spectrumscale_node: - op: "start" - name: "{{ groups['test_remove_nodes'][0] }}" - - tags: - - remove_node_down - - -- name: Test removing node that has unhealthy disks (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Test removing node that has unhealthy disks - block: - - name: Stop disk(s) - command: /usr/lpp/mmfs/bin/mmchdisk - "{{ hostvars[groups['test_remove_storage_nodes'][0]].filesystem }}" - "stop" - "-d" - "{{hostvars[groups['test_remove_storage_nodes'][0]].nsds}}" - - - name: Attempt to remove node with unhealthy disks and ensure node is not removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_storage_nodes'][0] }}" - register: result_node_remove - failed_when: > - (result_node_remove.rc == 0) or - ('FAILED' not in result_node_remove.msg) - - - name: Check to ensure node still exists - ibm_spectrumscale_node: - op: "get" - name: "{{ groups['test_remove_storage_nodes'][0] }}" - register: result_node_info - failed_when: > - (((result_node_info.result | from_json).clusterNodes | length) != 1) or - (groups['test_remove_nodes'][0] not in (result_node_info.result | from_json).clusterNodes[0].adminNodeName) - - - name: Start disk(s) - command: /usr/lpp/mmfs/bin/mmchdisk - "{{ hostvars[groups['test_remove_storage_nodes'][0]].filesystem }}" - "start" - "-d" - "{{ hostvars[groups['test_remove_storage_nodes'][0]].nsds }}" - when: (groups['test_remove_storage_nodes']|length > 0) - tags: - - remove_node_with_unhealthy_disk - - -- name: Test removing a quorum node (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Attempt to remove quorum node and ensure node is not removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['quorum_nodes'][0] }}" - register: result_quorum_node_remove - failed_when: > - (result_quorum_node_remove.rc == 0) or - ('FAILED' not in result_quorum_node_remove.msg) - - tags: - - remove_quorum_node - - -- name: Test removing a manager node (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Attempt to remove manager node and ensure node is not removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['manager_nodes'][0] }}" - register: result_manager_node_remove - failed_when: > - (result_manager_node_remove.rc == 0) or - ('FAILED' not in result_manager_node_remove.msg) - tags: - - remove_manager_node - - -- name: Test removing a node - hosts: controller - gather_facts: no - tasks: - - name: Remove a single node from the cluster - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_nodes'][0] }}" - register: node_remove_result - - - name: Check to ensure node has been deleted - ibm_spectrumscale_node: - op: "get" - name: "{{ groups['test_remove_nodes'][0] }}" - register: result_node_info - failed_when: > - (((result_node_info.result | from_json).clusterNodes | length) != 0) - tags: - - remove_valid_node - - -- name: Test removing a non existing node - hosts: controller - gather_facts: no - tasks: - - name: Remove a non existing node from the cluster - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_nodes'][0] }}" - register: node_remove_result - - tags: - - remove_duplicate_node - diff --git a/roles/custom_module/test/node/playbooks/node-status-test.yaml b/roles/custom_module/test/node/playbooks/node-status-test.yaml deleted file mode 100644 index 9bc90670..00000000 --- a/roles/custom_module/test/node/playbooks/node-status-test.yaml +++ /dev/null @@ -1,23 +0,0 @@ -- name: Test Spectrum Scale Node Module - hosts: controller - gather_facts: no - tasks: - - name: Query all node status in the cluster - ibm_spectrumscale_node: - op: "status" - register: nodes_status_info - - - name: Display all node statuses in the cluster - debug: - msg: "{{ nodes_status_info }}" - - - name: Query a single node status in the cluster - ibm_spectrumscale_node: - op: "status" - name: "node1.domain.com" - register: node_status_1_info - - - name: Display a singles node status information - debug: - msg: "{{ node_status_1_info }}" - diff --git a/roles/custom_module/test/node/python/add-node.json b/roles/custom_module/test/node/python/add-node.json deleted file mode 100644 index 2f5dd04a..00000000 --- a/roles/custom_module/test/node/python/add-node.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "ANSIBLE_MODULE_ARGS": { - "state": "present", - "name": "node1.domain.com,node2.domain.com", - "nodefile": "../common/stanza-add-node", - "license": "client" - } -} diff --git a/roles/custom_module/test/node/python/remove-node.json b/roles/custom_module/test/node/python/remove-node.json deleted file mode 100644 index e243d690..00000000 --- a/roles/custom_module/test/node/python/remove-node.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "ANSIBLE_MODULE_ARGS": { - "state": "absent", - "name": "node1.doamin.com" - } -} From 4b03f90fe63222baa059aa6dbb7140c8a122f67d Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 29 Oct 2021 19:42:51 +0200 Subject: [PATCH 004/113] Remove role-specific tags in favor of global collection tags Signed-off-by: Achim Christ --- roles/callhome/cluster/meta/main.yml | 6 +----- roles/callhome/node/meta/main.yml | 8 ++------ roles/callhome/postcheck/meta/main.yml | 6 +----- roles/callhome/precheck/meta/main.yml | 6 +----- roles/core/cluster/meta/main.yml | 6 +----- roles/core/common/meta/main.yml | 6 +----- roles/core/node/meta/main.yml | 6 +----- roles/core/postcheck/meta/main.yml | 6 +----- roles/core/precheck/meta/main.yml | 6 +----- roles/core/upgrade/meta/main.yml | 6 +----- roles/gui/cluster/meta/main.yml | 9 +-------- roles/gui/node/meta/main.yml | 9 +-------- roles/gui/postcheck/meta/main.yml | 9 +-------- roles/gui/precheck/meta/main.yml | 9 +-------- roles/gui/upgrade/meta/main.yml | 9 +-------- roles/nfs/cluster/meta/main.yml | 8 ++------ roles/nfs/common/meta/main.yml | 6 +----- roles/nfs/node/meta/main.yml | 9 ++------- roles/nfs/postcheck/meta/main.yml | 7 +------ roles/nfs/precheck/meta/main.yml | 7 +------ roles/nfs/upgrade/meta/main.yml | 6 +----- roles/remote_mount/meta/main.yml | 9 ++------- roles/scale_auth/upgrade/meta/main.yml | 8 ++------ roles/scale_ece/cluster/meta/main.yml | 9 +-------- roles/scale_ece/node/meta/main.yml | 9 +-------- roles/scale_ece/precheck/meta/main.yml | 9 +-------- roles/scale_ece/upgrade/meta/main.yml | 9 +-------- roles/scale_fileauditlogging/cluster/meta/main.yml | 9 +-------- roles/scale_fileauditlogging/node/meta/main.yml | 9 +-------- roles/scale_fileauditlogging/postcheck/meta/main.yml | 9 +-------- roles/scale_fileauditlogging/precheck/meta/main.yml | 9 +-------- roles/scale_fileauditlogging/upgrade/meta/main.yml | 9 +-------- roles/scale_hdfs/cluster/meta/main.yml | 6 +----- roles/scale_hdfs/node/meta/main.yml | 7 +------ roles/scale_hdfs/postcheck/meta/main.yml | 6 +----- roles/scale_hdfs/precheck/meta/main.yml | 6 +----- roles/scale_hdfs/upgrade/meta/main.yml | 8 ++------ roles/scale_hpt/node/meta/main.yml | 9 +-------- roles/scale_hpt/postcheck/meta/main.yml | 9 +-------- roles/scale_hpt/precheck/meta/main.yml | 9 +-------- roles/scale_hpt/upgrade/meta/main.yml | 9 +-------- roles/scale_object/cluster/meta/main.yml | 8 ++------ roles/scale_object/node/meta/main.yml | 9 ++------- roles/scale_object/postcheck/meta/main.yml | 7 +------ roles/scale_object/precheck/meta/main.yml | 8 ++------ roles/scale_object/upgrade/meta/main.yml | 8 ++------ roles/smb/cluster/meta/main.yml | 6 +----- roles/smb/node/meta/main.yml | 9 ++------- roles/smb/postcheck/meta/main.yml | 6 +----- roles/smb/precheck/meta/main.yml | 6 +----- roles/smb/upgrade/meta/main.yml | 6 +----- roles/zimon/cluster/meta/main.yml | 10 +--------- roles/zimon/node/meta/main.yml | 9 +-------- roles/zimon/postcheck/meta/main.yml | 9 +-------- roles/zimon/precheck/meta/main.yml | 9 +-------- roles/zimon/upgrade/meta/main.yml | 9 +-------- 56 files changed, 67 insertions(+), 369 deletions(-) diff --git a/roles/callhome/cluster/meta/main.yml b/roles/callhome/cluster/meta/main.yml index 1494a57c..988b7f46 100755 --- a/roles/callhome/cluster/meta/main.yml +++ b/roles/callhome/cluster/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - precheck diff --git a/roles/callhome/node/meta/main.yml b/roles/callhome/node/meta/main.yml index 5332470d..7470ba9e 100755 --- a/roles/callhome/node/meta/main.yml +++ b/roles/callhome/node/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: +dependencies: - core/common diff --git a/roles/callhome/postcheck/meta/main.yml b/roles/callhome/postcheck/meta/main.yml index 97d8df54..24c32ee8 100755 --- a/roles/callhome/postcheck/meta/main.yml +++ b/roles/callhome/postcheck/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] # - common diff --git a/roles/callhome/precheck/meta/main.yml b/roles/callhome/precheck/meta/main.yml index 8e1f498c..3e0ea96e 100755 --- a/roles/callhome/precheck/meta/main.yml +++ b/roles/callhome/precheck/meta/main.yml @@ -13,10 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/core/cluster/meta/main.yml b/roles/core/cluster/meta/main.yml index 6e123370..a5be8607 100644 --- a/roles/core/cluster/meta/main.yml +++ b/roles/core/cluster/meta/main.yml @@ -12,10 +12,6 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/core/common/meta/main.yml b/roles/core/common/meta/main.yml index 88542913..3f697136 100644 --- a/roles/core/common/meta/main.yml +++ b/roles/core/common/meta/main.yml @@ -12,10 +12,6 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/core/node/meta/main.yml b/roles/core/node/meta/main.yml index a3fcd724..64d5b75b 100644 --- a/roles/core/node/meta/main.yml +++ b/roles/core/node/meta/main.yml @@ -12,11 +12,7 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - common diff --git a/roles/core/postcheck/meta/main.yml b/roles/core/postcheck/meta/main.yml index f4128a33..465e1d59 100644 --- a/roles/core/postcheck/meta/main.yml +++ b/roles/core/postcheck/meta/main.yml @@ -12,10 +12,6 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/core/precheck/meta/main.yml b/roles/core/precheck/meta/main.yml index 65065ffd..0c3e1dd1 100644 --- a/roles/core/precheck/meta/main.yml +++ b/roles/core/precheck/meta/main.yml @@ -12,11 +12,7 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - common diff --git a/roles/core/upgrade/meta/main.yml b/roles/core/upgrade/meta/main.yml index a3fcd724..64d5b75b 100644 --- a/roles/core/upgrade/meta/main.yml +++ b/roles/core/upgrade/meta/main.yml @@ -12,11 +12,7 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - common diff --git a/roles/gui/cluster/meta/main.yml b/roles/gui/cluster/meta/main.yml index 211a3da2..84b93f03 100644 --- a/roles/gui/cluster/meta/main.yml +++ b/roles/gui/cluster/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/gui/node/meta/main.yml b/roles/gui/node/meta/main.yml index 4b239948..c5f3181d 100644 --- a/roles/gui/node/meta/main.yml +++ b/roles/gui/node/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/gui/postcheck/meta/main.yml b/roles/gui/postcheck/meta/main.yml index 26b463e5..4968a007 100644 --- a/roles/gui/postcheck/meta/main.yml +++ b/roles/gui/postcheck/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/gui/precheck/meta/main.yml b/roles/gui/precheck/meta/main.yml index ed1f3091..ce289486 100644 --- a/roles/gui/precheck/meta/main.yml +++ b/roles/gui/precheck/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/gui/upgrade/meta/main.yml b/roles/gui/upgrade/meta/main.yml index aa0a3dab..5fb65355 100644 --- a/roles/gui/upgrade/meta/main.yml +++ b/roles/gui/upgrade/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/nfs/cluster/meta/main.yml b/roles/nfs/cluster/meta/main.yml index 4e4eeb35..daadbbdd 100644 --- a/roles/nfs/cluster/meta/main.yml +++ b/roles/nfs/cluster/meta/main.yml @@ -12,13 +12,9 @@ galaxy_info: versions: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + + galaxy_tags: [] dependencies: - nfs/precheck - nfs/common - diff --git a/roles/nfs/common/meta/main.yml b/roles/nfs/common/meta/main.yml index 7cc89b22..90219f1b 100644 --- a/roles/nfs/common/meta/main.yml +++ b/roles/nfs/common/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - core/common diff --git a/roles/nfs/node/meta/main.yml b/roles/nfs/node/meta/main.yml index c68a66d3..f2be0093 100644 --- a/roles/nfs/node/meta/main.yml +++ b/roles/nfs/node/meta/main.yml @@ -13,13 +13,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: +dependencies: - core/common - nfs/precheck - diff --git a/roles/nfs/postcheck/meta/main.yml b/roles/nfs/postcheck/meta/main.yml index 86cfa371..a8993d08 100644 --- a/roles/nfs/postcheck/meta/main.yml +++ b/roles/nfs/postcheck/meta/main.yml @@ -13,11 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] - diff --git a/roles/nfs/precheck/meta/main.yml b/roles/nfs/precheck/meta/main.yml index 44ea4611..2d72c524 100644 --- a/roles/nfs/precheck/meta/main.yml +++ b/roles/nfs/precheck/meta/main.yml @@ -13,11 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] - diff --git a/roles/nfs/upgrade/meta/main.yml b/roles/nfs/upgrade/meta/main.yml index 07ad9f01..bd17de00 100644 --- a/roles/nfs/upgrade/meta/main.yml +++ b/roles/nfs/upgrade/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - core/common diff --git a/roles/remote_mount/meta/main.yml b/roles/remote_mount/meta/main.yml index fdb3fff5..9642e45b 100644 --- a/roles/remote_mount/meta/main.yml +++ b/roles/remote_mount/meta/main.yml @@ -13,11 +13,6 @@ galaxy_info: - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: - [] +dependencies: [] diff --git a/roles/scale_auth/upgrade/meta/main.yml b/roles/scale_auth/upgrade/meta/main.yml index fa7f21fe..86a2defa 100644 --- a/roles/scale_auth/upgrade/meta/main.yml +++ b/roles/scale_auth/upgrade/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: +dependencies: - core/common diff --git a/roles/scale_ece/cluster/meta/main.yml b/roles/scale_ece/cluster/meta/main.yml index d09b13b7..95746452 100644 --- a/roles/scale_ece/cluster/meta/main.yml +++ b/roles/scale_ece/cluster/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_ece/node/meta/main.yml b/roles/scale_ece/node/meta/main.yml index f814fc31..c821e795 100644 --- a/roles/scale_ece/node/meta/main.yml +++ b/roles/scale_ece/node/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/scale_ece/precheck/meta/main.yml b/roles/scale_ece/precheck/meta/main.yml index d09b13b7..95746452 100644 --- a/roles/scale_ece/precheck/meta/main.yml +++ b/roles/scale_ece/precheck/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_ece/upgrade/meta/main.yml b/roles/scale_ece/upgrade/meta/main.yml index f814fc31..c821e795 100644 --- a/roles/scale_ece/upgrade/meta/main.yml +++ b/roles/scale_ece/upgrade/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/scale_fileauditlogging/cluster/meta/main.yml b/roles/scale_fileauditlogging/cluster/meta/main.yml index b3e6d5b4..f4617e41 100644 --- a/roles/scale_fileauditlogging/cluster/meta/main.yml +++ b/roles/scale_fileauditlogging/cluster/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_fileauditlogging/node/meta/main.yml b/roles/scale_fileauditlogging/node/meta/main.yml index bfbb1825..e10817dd 100644 --- a/roles/scale_fileauditlogging/node/meta/main.yml +++ b/roles/scale_fileauditlogging/node/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/scale_fileauditlogging/postcheck/meta/main.yml b/roles/scale_fileauditlogging/postcheck/meta/main.yml index b3e6d5b4..f4617e41 100644 --- a/roles/scale_fileauditlogging/postcheck/meta/main.yml +++ b/roles/scale_fileauditlogging/postcheck/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_fileauditlogging/precheck/meta/main.yml b/roles/scale_fileauditlogging/precheck/meta/main.yml index b3e6d5b4..f4617e41 100644 --- a/roles/scale_fileauditlogging/precheck/meta/main.yml +++ b/roles/scale_fileauditlogging/precheck/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_fileauditlogging/upgrade/meta/main.yml b/roles/scale_fileauditlogging/upgrade/meta/main.yml index bfbb1825..e10817dd 100644 --- a/roles/scale_fileauditlogging/upgrade/meta/main.yml +++ b/roles/scale_fileauditlogging/upgrade/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/scale_hdfs/cluster/meta/main.yml b/roles/scale_hdfs/cluster/meta/main.yml index b6cf2dcb..e289b968 100644 --- a/roles/scale_hdfs/cluster/meta/main.yml +++ b/roles/scale_hdfs/cluster/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - nfs/common diff --git a/roles/scale_hdfs/node/meta/main.yml b/roles/scale_hdfs/node/meta/main.yml index 480edff7..3d8a8bc4 100644 --- a/roles/scale_hdfs/node/meta/main.yml +++ b/roles/scale_hdfs/node/meta/main.yml @@ -13,13 +13,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - core/common - scale_hdfs/precheck - diff --git a/roles/scale_hdfs/postcheck/meta/main.yml b/roles/scale_hdfs/postcheck/meta/main.yml index 5d862b46..174cca00 100644 --- a/roles/scale_hdfs/postcheck/meta/main.yml +++ b/roles/scale_hdfs/postcheck/meta/main.yml @@ -13,10 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hdfs/precheck/meta/main.yml b/roles/scale_hdfs/precheck/meta/main.yml index 5d862b46..174cca00 100644 --- a/roles/scale_hdfs/precheck/meta/main.yml +++ b/roles/scale_hdfs/precheck/meta/main.yml @@ -13,10 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hdfs/upgrade/meta/main.yml b/roles/scale_hdfs/upgrade/meta/main.yml index 477ef8a5..58f6f40f 100644 --- a/roles/scale_hdfs/upgrade/meta/main.yml +++ b/roles/scale_hdfs/upgrade/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - core/common \ No newline at end of file + - core/common diff --git a/roles/scale_hpt/node/meta/main.yml b/roles/scale_hpt/node/meta/main.yml index 51f61bcc..eb421d65 100644 --- a/roles/scale_hpt/node/meta/main.yml +++ b/roles/scale_hpt/node/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/scale_hpt/postcheck/meta/main.yml b/roles/scale_hpt/postcheck/meta/main.yml index 12cc5d17..6dd669dc 100644 --- a/roles/scale_hpt/postcheck/meta/main.yml +++ b/roles/scale_hpt/postcheck/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hpt/precheck/meta/main.yml b/roles/scale_hpt/precheck/meta/main.yml index 496df107..1ed07c51 100644 --- a/roles/scale_hpt/precheck/meta/main.yml +++ b/roles/scale_hpt/precheck/meta/main.yml @@ -13,13 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hpt/upgrade/meta/main.yml b/roles/scale_hpt/upgrade/meta/main.yml index 51f61bcc..eb421d65 100644 --- a/roles/scale_hpt/upgrade/meta/main.yml +++ b/roles/scale_hpt/upgrade/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/scale_object/cluster/meta/main.yml b/roles/scale_object/cluster/meta/main.yml index 621ddd1c..cea0daa9 100644 --- a/roles/scale_object/cluster/meta/main.yml +++ b/roles/scale_object/cluster/meta/main.yml @@ -12,13 +12,9 @@ galaxy_info: versions: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + + galaxy_tags: [] dependencies: - scale_object/precheck - nfs/common - diff --git a/roles/scale_object/node/meta/main.yml b/roles/scale_object/node/meta/main.yml index d1d3bda5..1e8976c1 100644 --- a/roles/scale_object/node/meta/main.yml +++ b/roles/scale_object/node/meta/main.yml @@ -12,13 +12,8 @@ galaxy_info: versions: - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: +dependencies: - scale_object/precheck - nfs/common - diff --git a/roles/scale_object/postcheck/meta/main.yml b/roles/scale_object/postcheck/meta/main.yml index 69417584..d7a19a88 100644 --- a/roles/scale_object/postcheck/meta/main.yml +++ b/roles/scale_object/postcheck/meta/main.yml @@ -13,11 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] - diff --git a/roles/scale_object/precheck/meta/main.yml b/roles/scale_object/precheck/meta/main.yml index 828b184c..cf2ae6d1 100644 --- a/roles/scale_object/precheck/meta/main.yml +++ b/roles/scale_object/precheck/meta/main.yml @@ -12,11 +12,7 @@ galaxy_info: versions: - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: +dependencies: - core/common diff --git a/roles/scale_object/upgrade/meta/main.yml b/roles/scale_object/upgrade/meta/main.yml index 289c1eb1..27c7e378 100644 --- a/roles/scale_object/upgrade/meta/main.yml +++ b/roles/scale_object/upgrade/meta/main.yml @@ -12,12 +12,8 @@ galaxy_info: versions: - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: +dependencies: - core/common - nfs/common diff --git a/roles/smb/cluster/meta/main.yml b/roles/smb/cluster/meta/main.yml index 295fb1dd..59363275 100644 --- a/roles/smb/cluster/meta/main.yml +++ b/roles/smb/cluster/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - nfs/common diff --git a/roles/smb/node/meta/main.yml b/roles/smb/node/meta/main.yml index ec26b7e2..0b51de54 100644 --- a/roles/smb/node/meta/main.yml +++ b/roles/smb/node/meta/main.yml @@ -13,13 +13,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - core/common + - core/common - smb/precheck - diff --git a/roles/smb/postcheck/meta/main.yml b/roles/smb/postcheck/meta/main.yml index 16770b46..54c862c4 100644 --- a/roles/smb/postcheck/meta/main.yml +++ b/roles/smb/postcheck/meta/main.yml @@ -13,10 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/smb/precheck/meta/main.yml b/roles/smb/precheck/meta/main.yml index 3770ad52..6a211648 100644 --- a/roles/smb/precheck/meta/main.yml +++ b/roles/smb/precheck/meta/main.yml @@ -13,10 +13,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/smb/upgrade/meta/main.yml b/roles/smb/upgrade/meta/main.yml index 07ad9f01..bd17de00 100644 --- a/roles/smb/upgrade/meta/main.yml +++ b/roles/smb/upgrade/meta/main.yml @@ -13,11 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - core/common diff --git a/roles/zimon/cluster/meta/main.yml b/roles/zimon/cluster/meta/main.yml index b57e4c95..ebecad84 100644 --- a/roles/zimon/cluster/meta/main.yml +++ b/roles/zimon/cluster/meta/main.yml @@ -13,15 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui - - zimon + galaxy_tags: [] dependencies: - core/common diff --git a/roles/zimon/node/meta/main.yml b/roles/zimon/node/meta/main.yml index bc8ff453..cbcb835d 100644 --- a/roles/zimon/node/meta/main.yml +++ b/roles/zimon/node/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/zimon/postcheck/meta/main.yml b/roles/zimon/postcheck/meta/main.yml index 3e1c6229..a86d7f48 100644 --- a/roles/zimon/postcheck/meta/main.yml +++ b/roles/zimon/postcheck/meta/main.yml @@ -12,13 +12,6 @@ galaxy_info: versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/zimon/precheck/meta/main.yml b/roles/zimon/precheck/meta/main.yml index b75780c9..0ad414e2 100644 --- a/roles/zimon/precheck/meta/main.yml +++ b/roles/zimon/precheck/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common diff --git a/roles/zimon/upgrade/meta/main.yml b/roles/zimon/upgrade/meta/main.yml index bc8ff453..cbcb835d 100644 --- a/roles/zimon/upgrade/meta/main.yml +++ b/roles/zimon/upgrade/meta/main.yml @@ -13,14 +13,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - core/common From 9a3bc4eef590914c2718992a689712aaa19f5ac4 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 29 Oct 2021 20:04:37 +0200 Subject: [PATCH 005/113] Remove role name from role metadata (ignored for collections) Signed-off-by: Achim Christ --- roles/callhome/cluster/meta/main.yml | 1 - roles/callhome/node/meta/main.yml | 1 - roles/callhome/postcheck/meta/main.yml | 1 - roles/callhome/precheck/meta/main.yml | 1 - roles/core/cluster/meta/main.yml | 1 - roles/core/common/meta/main.yml | 1 - roles/core/node/meta/main.yml | 1 - roles/core/postcheck/meta/main.yml | 1 - roles/core/precheck/meta/main.yml | 1 - roles/core/upgrade/meta/main.yml | 1 - roles/gui/cluster/meta/main.yml | 1 - roles/gui/node/meta/main.yml | 1 - roles/gui/postcheck/meta/main.yml | 1 - roles/gui/precheck/meta/main.yml | 1 - roles/gui/upgrade/meta/main.yml | 1 - roles/nfs/cluster/meta/main.yml | 1 - roles/nfs/common/meta/main.yml | 1 - roles/nfs/node/meta/main.yml | 1 - roles/nfs/postcheck/meta/main.yml | 1 - roles/nfs/precheck/meta/main.yml | 1 - roles/nfs/upgrade/meta/main.yml | 1 - roles/remote_mount/meta/main.yml | 1 - roles/scale_auth/upgrade/meta/main.yml | 1 - roles/scale_ece/cluster/meta/main.yml | 1 - roles/scale_ece/node/meta/main.yml | 1 - roles/scale_ece/precheck/meta/main.yml | 1 - roles/scale_ece/upgrade/meta/main.yml | 1 - roles/scale_fileauditlogging/cluster/meta/main.yml | 1 - roles/scale_fileauditlogging/node/meta/main.yml | 1 - roles/scale_fileauditlogging/postcheck/meta/main.yml | 1 - roles/scale_fileauditlogging/precheck/meta/main.yml | 1 - roles/scale_fileauditlogging/upgrade/meta/main.yml | 1 - roles/scale_hdfs/cluster/meta/main.yml | 1 - roles/scale_hdfs/node/meta/main.yml | 1 - roles/scale_hdfs/postcheck/meta/main.yml | 1 - roles/scale_hdfs/precheck/meta/main.yml | 1 - roles/scale_hdfs/upgrade/meta/main.yml | 1 - roles/scale_hpt/node/meta/main.yml | 1 - roles/scale_hpt/postcheck/meta/main.yml | 1 - roles/scale_hpt/precheck/meta/main.yml | 1 - roles/scale_hpt/upgrade/meta/main.yml | 1 - roles/scale_object/cluster/meta/main.yml | 1 - roles/scale_object/node/meta/main.yml | 1 - roles/scale_object/postcheck/meta/main.yml | 1 - roles/scale_object/precheck/meta/main.yml | 1 - roles/scale_object/upgrade/meta/main.yml | 1 - roles/smb/cluster/meta/main.yml | 1 - roles/smb/node/meta/main.yml | 1 - roles/smb/postcheck/meta/main.yml | 1 - roles/smb/precheck/meta/main.yml | 1 - roles/smb/upgrade/meta/main.yml | 1 - roles/zimon/cluster/meta/main.yml | 1 - roles/zimon/node/meta/main.yml | 1 - roles/zimon/postcheck/meta/main.yml | 1 - roles/zimon/precheck/meta/main.yml | 1 - roles/zimon/upgrade/meta/main.yml | 1 - 56 files changed, 56 deletions(-) diff --git a/roles/callhome/cluster/meta/main.yml b/roles/callhome/cluster/meta/main.yml index 988b7f46..5962d6cb 100755 --- a/roles/callhome/cluster/meta/main.yml +++ b/roles/callhome/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: callhome_cluster author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/callhome/node/meta/main.yml b/roles/callhome/node/meta/main.yml index 7470ba9e..7e4a4cd4 100755 --- a/roles/callhome/node/meta/main.yml +++ b/roles/callhome/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: callhome_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/callhome/postcheck/meta/main.yml b/roles/callhome/postcheck/meta/main.yml index 24c32ee8..a7943cab 100755 --- a/roles/callhome/postcheck/meta/main.yml +++ b/roles/callhome/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: callhome_postcheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/callhome/precheck/meta/main.yml b/roles/callhome/precheck/meta/main.yml index 3e0ea96e..2a7d556b 100755 --- a/roles/callhome/precheck/meta/main.yml +++ b/roles/callhome/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: callhome_precheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/core/cluster/meta/main.yml b/roles/core/cluster/meta/main.yml index a5be8607..0ce97c21 100644 --- a/roles/core/cluster/meta/main.yml +++ b/roles/core/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: core_cluster author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/core/common/meta/main.yml b/roles/core/common/meta/main.yml index 3f697136..0ce97c21 100644 --- a/roles/core/common/meta/main.yml +++ b/roles/core/common/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: core_common author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/core/node/meta/main.yml b/roles/core/node/meta/main.yml index 64d5b75b..f6b31236 100644 --- a/roles/core/node/meta/main.yml +++ b/roles/core/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: core_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/core/postcheck/meta/main.yml b/roles/core/postcheck/meta/main.yml index 465e1d59..0ce97c21 100644 --- a/roles/core/postcheck/meta/main.yml +++ b/roles/core/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: core_postcheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/core/precheck/meta/main.yml b/roles/core/precheck/meta/main.yml index 0c3e1dd1..f6b31236 100644 --- a/roles/core/precheck/meta/main.yml +++ b/roles/core/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: core_precheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/core/upgrade/meta/main.yml b/roles/core/upgrade/meta/main.yml index 64d5b75b..f6b31236 100644 --- a/roles/core/upgrade/meta/main.yml +++ b/roles/core/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: core_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/gui/cluster/meta/main.yml b/roles/gui/cluster/meta/main.yml index 84b93f03..e1f87c04 100644 --- a/roles/gui/cluster/meta/main.yml +++ b/roles/gui/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: gui_cluster author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM diff --git a/roles/gui/node/meta/main.yml b/roles/gui/node/meta/main.yml index c5f3181d..5b8d2b6a 100644 --- a/roles/gui/node/meta/main.yml +++ b/roles/gui/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: gui_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM diff --git a/roles/gui/postcheck/meta/main.yml b/roles/gui/postcheck/meta/main.yml index 4968a007..d88028c1 100644 --- a/roles/gui/postcheck/meta/main.yml +++ b/roles/gui/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: gui_postcheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM diff --git a/roles/gui/precheck/meta/main.yml b/roles/gui/precheck/meta/main.yml index ce289486..80c11a9a 100644 --- a/roles/gui/precheck/meta/main.yml +++ b/roles/gui/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: gui_precheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM diff --git a/roles/gui/upgrade/meta/main.yml b/roles/gui/upgrade/meta/main.yml index 5fb65355..80c11a9a 100644 --- a/roles/gui/upgrade/meta/main.yml +++ b/roles/gui/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: gui_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM diff --git a/roles/nfs/cluster/meta/main.yml b/roles/nfs/cluster/meta/main.yml index daadbbdd..82cb14a4 100644 --- a/roles/nfs/cluster/meta/main.yml +++ b/roles/nfs/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_cluster author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/nfs/common/meta/main.yml b/roles/nfs/common/meta/main.yml index 90219f1b..59137533 100644 --- a/roles/nfs/common/meta/main.yml +++ b/roles/nfs/common/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: ces_common author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/nfs/node/meta/main.yml b/roles/nfs/node/meta/main.yml index f2be0093..4de5378e 100644 --- a/roles/nfs/node/meta/main.yml +++ b/roles/nfs/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/nfs/postcheck/meta/main.yml b/roles/nfs/postcheck/meta/main.yml index a8993d08..2a7d556b 100644 --- a/roles/nfs/postcheck/meta/main.yml +++ b/roles/nfs/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_postcheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/nfs/precheck/meta/main.yml b/roles/nfs/precheck/meta/main.yml index 2d72c524..2a7d556b 100644 --- a/roles/nfs/precheck/meta/main.yml +++ b/roles/nfs/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_precheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/nfs/upgrade/meta/main.yml b/roles/nfs/upgrade/meta/main.yml index bd17de00..ebe71671 100644 --- a/roles/nfs/upgrade/meta/main.yml +++ b/roles/nfs/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/remote_mount/meta/main.yml b/roles/remote_mount/meta/main.yml index 9642e45b..f9909e7e 100644 --- a/roles/remote_mount/meta/main.yml +++ b/roles/remote_mount/meta/main.yml @@ -1,5 +1,4 @@ galaxy_info: - role_name: remote_mount author: IBM Corporation description: IBM Spectrum Scale (GPFS) ansible role to configure remote mount company: IBM diff --git a/roles/scale_auth/upgrade/meta/main.yml b/roles/scale_auth/upgrade/meta/main.yml index 86a2defa..ebe71671 100644 --- a/roles/scale_auth/upgrade/meta/main.yml +++ b/roles/scale_auth/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_auth author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_ece/cluster/meta/main.yml b/roles/scale_ece/cluster/meta/main.yml index 95746452..1ae3f767 100644 --- a/roles/scale_ece/cluster/meta/main.yml +++ b/roles/scale_ece/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM diff --git a/roles/scale_ece/node/meta/main.yml b/roles/scale_ece/node/meta/main.yml index c821e795..b45dc4b4 100644 --- a/roles/scale_ece/node/meta/main.yml +++ b/roles/scale_ece/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM diff --git a/roles/scale_ece/precheck/meta/main.yml b/roles/scale_ece/precheck/meta/main.yml index 95746452..1ae3f767 100644 --- a/roles/scale_ece/precheck/meta/main.yml +++ b/roles/scale_ece/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM diff --git a/roles/scale_ece/upgrade/meta/main.yml b/roles/scale_ece/upgrade/meta/main.yml index c821e795..b45dc4b4 100644 --- a/roles/scale_ece/upgrade/meta/main.yml +++ b/roles/scale_ece/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM diff --git a/roles/scale_fileauditlogging/cluster/meta/main.yml b/roles/scale_fileauditlogging/cluster/meta/main.yml index f4617e41..2a7f49a9 100644 --- a/roles/scale_fileauditlogging/cluster/meta/main.yml +++ b/roles/scale_fileauditlogging/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM diff --git a/roles/scale_fileauditlogging/node/meta/main.yml b/roles/scale_fileauditlogging/node/meta/main.yml index e10817dd..8c18242e 100644 --- a/roles/scale_fileauditlogging/node/meta/main.yml +++ b/roles/scale_fileauditlogging/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM diff --git a/roles/scale_fileauditlogging/postcheck/meta/main.yml b/roles/scale_fileauditlogging/postcheck/meta/main.yml index f4617e41..2a7f49a9 100644 --- a/roles/scale_fileauditlogging/postcheck/meta/main.yml +++ b/roles/scale_fileauditlogging/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM diff --git a/roles/scale_fileauditlogging/precheck/meta/main.yml b/roles/scale_fileauditlogging/precheck/meta/main.yml index f4617e41..2a7f49a9 100644 --- a/roles/scale_fileauditlogging/precheck/meta/main.yml +++ b/roles/scale_fileauditlogging/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM diff --git a/roles/scale_fileauditlogging/upgrade/meta/main.yml b/roles/scale_fileauditlogging/upgrade/meta/main.yml index e10817dd..8c18242e 100644 --- a/roles/scale_fileauditlogging/upgrade/meta/main.yml +++ b/roles/scale_fileauditlogging/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM diff --git a/roles/scale_hdfs/cluster/meta/main.yml b/roles/scale_hdfs/cluster/meta/main.yml index e289b968..e5a842f3 100644 --- a/roles/scale_hdfs/cluster/meta/main.yml +++ b/roles/scale_hdfs/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_hdfs/node/meta/main.yml b/roles/scale_hdfs/node/meta/main.yml index 3d8a8bc4..64229bd5 100644 --- a/roles/scale_hdfs/node/meta/main.yml +++ b/roles/scale_hdfs/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_hdfs/postcheck/meta/main.yml b/roles/scale_hdfs/postcheck/meta/main.yml index 174cca00..2a7d556b 100644 --- a/roles/scale_hdfs/postcheck/meta/main.yml +++ b/roles/scale_hdfs/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_hdfs/precheck/meta/main.yml b/roles/scale_hdfs/precheck/meta/main.yml index 174cca00..2a7d556b 100644 --- a/roles/scale_hdfs/precheck/meta/main.yml +++ b/roles/scale_hdfs/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_hdfs/upgrade/meta/main.yml b/roles/scale_hdfs/upgrade/meta/main.yml index 58f6f40f..ebe71671 100644 --- a/roles/scale_hdfs/upgrade/meta/main.yml +++ b/roles/scale_hdfs/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_hpt/node/meta/main.yml b/roles/scale_hpt/node/meta/main.yml index eb421d65..729731da 100644 --- a/roles/scale_hpt/node/meta/main.yml +++ b/roles/scale_hpt/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hpt author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS package company: IBM diff --git a/roles/scale_hpt/postcheck/meta/main.yml b/roles/scale_hpt/postcheck/meta/main.yml index 6dd669dc..1a05efc9 100644 --- a/roles/scale_hpt/postcheck/meta/main.yml +++ b/roles/scale_hpt/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: hpt_postcheck author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS (HPT) company: IBM diff --git a/roles/scale_hpt/precheck/meta/main.yml b/roles/scale_hpt/precheck/meta/main.yml index 1ed07c51..1a05efc9 100644 --- a/roles/scale_hpt/precheck/meta/main.yml +++ b/roles/scale_hpt/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: hpt_preheck author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS (HPT) company: IBM diff --git a/roles/scale_hpt/upgrade/meta/main.yml b/roles/scale_hpt/upgrade/meta/main.yml index eb421d65..729731da 100644 --- a/roles/scale_hpt/upgrade/meta/main.yml +++ b/roles/scale_hpt/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: scale_hpt author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS package company: IBM diff --git a/roles/scale_object/cluster/meta/main.yml b/roles/scale_object/cluster/meta/main.yml index cea0daa9..504740a2 100644 --- a/roles/scale_object/cluster/meta/main.yml +++ b/roles/scale_object/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: obj_protocol author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_object/node/meta/main.yml b/roles/scale_object/node/meta/main.yml index 1e8976c1..d1977a7a 100644 --- a/roles/scale_object/node/meta/main.yml +++ b/roles/scale_object/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: obj_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_object/postcheck/meta/main.yml b/roles/scale_object/postcheck/meta/main.yml index d7a19a88..2a7d556b 100644 --- a/roles/scale_object/postcheck/meta/main.yml +++ b/roles/scale_object/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: obj_postcheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_object/precheck/meta/main.yml b/roles/scale_object/precheck/meta/main.yml index cf2ae6d1..a62a1517 100644 --- a/roles/scale_object/precheck/meta/main.yml +++ b/roles/scale_object/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: obj_precheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/scale_object/upgrade/meta/main.yml b/roles/scale_object/upgrade/meta/main.yml index 27c7e378..ddd3acff 100644 --- a/roles/scale_object/upgrade/meta/main.yml +++ b/roles/scale_object/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: obj_upgrade author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/smb/cluster/meta/main.yml b/roles/smb/cluster/meta/main.yml index 59363275..e5a842f3 100644 --- a/roles/smb/cluster/meta/main.yml +++ b/roles/smb/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/smb/node/meta/main.yml b/roles/smb/node/meta/main.yml index 0b51de54..3fdc4f97 100644 --- a/roles/smb/node/meta/main.yml +++ b/roles/smb/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/smb/postcheck/meta/main.yml b/roles/smb/postcheck/meta/main.yml index 54c862c4..2a7d556b 100644 --- a/roles/smb/postcheck/meta/main.yml +++ b/roles/smb/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/smb/precheck/meta/main.yml b/roles/smb/precheck/meta/main.yml index 6a211648..2a7d556b 100644 --- a/roles/smb/precheck/meta/main.yml +++ b/roles/smb/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: smb author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/smb/upgrade/meta/main.yml b/roles/smb/upgrade/meta/main.yml index bd17de00..ebe71671 100644 --- a/roles/smb/upgrade/meta/main.yml +++ b/roles/smb/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM diff --git a/roles/zimon/cluster/meta/main.yml b/roles/zimon/cluster/meta/main.yml index ebecad84..03dd0794 100644 --- a/roles/zimon/cluster/meta/main.yml +++ b/roles/zimon/cluster/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: zimon_cluster author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM diff --git a/roles/zimon/node/meta/main.yml b/roles/zimon/node/meta/main.yml index cbcb835d..03dd0794 100644 --- a/roles/zimon/node/meta/main.yml +++ b/roles/zimon/node/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: zimon_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM diff --git a/roles/zimon/postcheck/meta/main.yml b/roles/zimon/postcheck/meta/main.yml index a86d7f48..da9f40d2 100644 --- a/roles/zimon/postcheck/meta/main.yml +++ b/roles/zimon/postcheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: zimon_postcheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM diff --git a/roles/zimon/precheck/meta/main.yml b/roles/zimon/precheck/meta/main.yml index 0ad414e2..03dd0794 100644 --- a/roles/zimon/precheck/meta/main.yml +++ b/roles/zimon/precheck/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: zimon_precheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM diff --git a/roles/zimon/upgrade/meta/main.yml b/roles/zimon/upgrade/meta/main.yml index cbcb835d..03dd0794 100644 --- a/roles/zimon/upgrade/meta/main.yml +++ b/roles/zimon/upgrade/meta/main.yml @@ -1,6 +1,5 @@ --- galaxy_info: - role_name: zimon_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM From f715be078e50b3e798b57dbba80ecc8db2633725 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 29 Oct 2021 20:46:14 +0200 Subject: [PATCH 006/113] Bump min_ansible_version to 2.9, consistent with README Signed-off-by: Achim Christ --- roles/callhome/cluster/meta/main.yml | 4 +++- roles/callhome/node/meta/main.yml | 4 +++- roles/callhome/postcheck/meta/main.yml | 4 +++- roles/callhome/precheck/meta/main.yml | 4 +++- roles/core/cluster/meta/main.yml | 4 +++- roles/core/common/meta/main.yml | 4 +++- roles/core/node/meta/main.yml | 4 +++- roles/core/postcheck/meta/main.yml | 4 +++- roles/core/precheck/meta/main.yml | 4 +++- roles/core/upgrade/meta/main.yml | 4 +++- roles/gui/cluster/meta/main.yml | 4 +++- roles/gui/node/meta/main.yml | 4 +++- roles/gui/postcheck/meta/main.yml | 4 +++- roles/gui/precheck/meta/main.yml | 4 +++- roles/gui/upgrade/meta/main.yml | 4 +++- roles/nfs/cluster/meta/main.yml | 4 +++- roles/nfs/common/meta/main.yml | 4 +++- roles/nfs/node/meta/main.yml | 4 +++- roles/nfs/postcheck/meta/main.yml | 4 +++- roles/nfs/precheck/meta/main.yml | 4 +++- roles/nfs/upgrade/meta/main.yml | 4 +++- roles/remote_mount/meta/main.yml | 2 ++ roles/scale_auth/upgrade/meta/main.yml | 4 +++- roles/scale_ece/cluster/meta/main.yml | 4 +++- roles/scale_ece/node/meta/main.yml | 4 +++- roles/scale_ece/precheck/meta/main.yml | 4 +++- roles/scale_ece/upgrade/meta/main.yml | 4 +++- roles/scale_fileauditlogging/cluster/meta/main.yml | 4 +++- roles/scale_fileauditlogging/node/meta/main.yml | 4 +++- roles/scale_fileauditlogging/postcheck/meta/main.yml | 4 +++- roles/scale_fileauditlogging/precheck/meta/main.yml | 4 +++- roles/scale_fileauditlogging/upgrade/meta/main.yml | 4 +++- roles/scale_hdfs/cluster/meta/main.yml | 4 +++- roles/scale_hdfs/node/meta/main.yml | 4 +++- roles/scale_hdfs/postcheck/meta/main.yml | 4 +++- roles/scale_hdfs/precheck/meta/main.yml | 4 +++- roles/scale_hdfs/upgrade/meta/main.yml | 4 +++- roles/scale_hpt/node/meta/main.yml | 2 ++ roles/scale_hpt/postcheck/meta/main.yml | 2 ++ roles/scale_hpt/precheck/meta/main.yml | 2 ++ roles/scale_hpt/upgrade/meta/main.yml | 2 ++ roles/scale_object/cluster/meta/main.yml | 4 +++- roles/scale_object/node/meta/main.yml | 4 +++- roles/scale_object/postcheck/meta/main.yml | 4 +++- roles/scale_object/precheck/meta/main.yml | 4 +++- roles/scale_object/upgrade/meta/main.yml | 4 +++- roles/smb/cluster/meta/main.yml | 4 +++- roles/smb/node/meta/main.yml | 4 +++- roles/smb/postcheck/meta/main.yml | 4 +++- roles/smb/precheck/meta/main.yml | 4 +++- roles/smb/upgrade/meta/main.yml | 4 +++- roles/zimon/cluster/meta/main.yml | 4 +++- roles/zimon/node/meta/main.yml | 4 +++- roles/zimon/postcheck/meta/main.yml | 4 +++- roles/zimon/precheck/meta/main.yml | 4 +++- roles/zimon/upgrade/meta/main.yml | 4 +++- 56 files changed, 163 insertions(+), 51 deletions(-) diff --git a/roles/callhome/cluster/meta/main.yml b/roles/callhome/cluster/meta/main.yml index 5962d6cb..04ac82ac 100755 --- a/roles/callhome/cluster/meta/main.yml +++ b/roles/callhome/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/callhome/node/meta/main.yml b/roles/callhome/node/meta/main.yml index 7e4a4cd4..21f2356f 100755 --- a/roles/callhome/node/meta/main.yml +++ b/roles/callhome/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/callhome/postcheck/meta/main.yml b/roles/callhome/postcheck/meta/main.yml index a7943cab..d91e0cff 100755 --- a/roles/callhome/postcheck/meta/main.yml +++ b/roles/callhome/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/callhome/precheck/meta/main.yml b/roles/callhome/precheck/meta/main.yml index 2a7d556b..dab8063f 100755 --- a/roles/callhome/precheck/meta/main.yml +++ b/roles/callhome/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/core/cluster/meta/main.yml b/roles/core/cluster/meta/main.yml index 0ce97c21..9cd697a4 100644 --- a/roles/core/cluster/meta/main.yml +++ b/roles/core/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/core/common/meta/main.yml b/roles/core/common/meta/main.yml index 0ce97c21..9cd697a4 100644 --- a/roles/core/common/meta/main.yml +++ b/roles/core/common/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/core/node/meta/main.yml b/roles/core/node/meta/main.yml index f6b31236..3cdd5230 100644 --- a/roles/core/node/meta/main.yml +++ b/roles/core/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/core/postcheck/meta/main.yml b/roles/core/postcheck/meta/main.yml index 0ce97c21..9cd697a4 100644 --- a/roles/core/postcheck/meta/main.yml +++ b/roles/core/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/core/precheck/meta/main.yml b/roles/core/precheck/meta/main.yml index f6b31236..3cdd5230 100644 --- a/roles/core/precheck/meta/main.yml +++ b/roles/core/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/core/upgrade/meta/main.yml b/roles/core/upgrade/meta/main.yml index f6b31236..3cdd5230 100644 --- a/roles/core/upgrade/meta/main.yml +++ b/roles/core/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/gui/cluster/meta/main.yml b/roles/gui/cluster/meta/main.yml index e1f87c04..4c56928f 100644 --- a/roles/gui/cluster/meta/main.yml +++ b/roles/gui/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/gui/node/meta/main.yml b/roles/gui/node/meta/main.yml index 5b8d2b6a..0e9ee42d 100644 --- a/roles/gui/node/meta/main.yml +++ b/roles/gui/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/gui/postcheck/meta/main.yml b/roles/gui/postcheck/meta/main.yml index d88028c1..9012eb68 100644 --- a/roles/gui/postcheck/meta/main.yml +++ b/roles/gui/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/gui/precheck/meta/main.yml b/roles/gui/precheck/meta/main.yml index 80c11a9a..115fdc7e 100644 --- a/roles/gui/precheck/meta/main.yml +++ b/roles/gui/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/gui/upgrade/meta/main.yml b/roles/gui/upgrade/meta/main.yml index 80c11a9a..115fdc7e 100644 --- a/roles/gui/upgrade/meta/main.yml +++ b/roles/gui/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/nfs/cluster/meta/main.yml b/roles/nfs/cluster/meta/main.yml index 82cb14a4..a7182ff2 100644 --- a/roles/nfs/cluster/meta/main.yml +++ b/roles/nfs/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/nfs/common/meta/main.yml b/roles/nfs/common/meta/main.yml index 59137533..9e6f1f9c 100644 --- a/roles/nfs/common/meta/main.yml +++ b/roles/nfs/common/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/nfs/node/meta/main.yml b/roles/nfs/node/meta/main.yml index 4de5378e..a6818ed3 100644 --- a/roles/nfs/node/meta/main.yml +++ b/roles/nfs/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/nfs/postcheck/meta/main.yml b/roles/nfs/postcheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/nfs/postcheck/meta/main.yml +++ b/roles/nfs/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/nfs/precheck/meta/main.yml b/roles/nfs/precheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/nfs/precheck/meta/main.yml +++ b/roles/nfs/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/nfs/upgrade/meta/main.yml b/roles/nfs/upgrade/meta/main.yml index ebe71671..9722f0d4 100644 --- a/roles/nfs/upgrade/meta/main.yml +++ b/roles/nfs/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/remote_mount/meta/main.yml b/roles/remote_mount/meta/main.yml index f9909e7e..cfd80fb5 100644 --- a/roles/remote_mount/meta/main.yml +++ b/roles/remote_mount/meta/main.yml @@ -2,7 +2,9 @@ galaxy_info: author: IBM Corporation description: IBM Spectrum Scale (GPFS) ansible role to configure remote mount company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: diff --git a/roles/scale_auth/upgrade/meta/main.yml b/roles/scale_auth/upgrade/meta/main.yml index ebe71671..9722f0d4 100644 --- a/roles/scale_auth/upgrade/meta/main.yml +++ b/roles/scale_auth/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_ece/cluster/meta/main.yml b/roles/scale_ece/cluster/meta/main.yml index 1ae3f767..fb8c5b02 100644 --- a/roles/scale_ece/cluster/meta/main.yml +++ b/roles/scale_ece/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_ece/node/meta/main.yml b/roles/scale_ece/node/meta/main.yml index b45dc4b4..db7ecfa4 100644 --- a/roles/scale_ece/node/meta/main.yml +++ b/roles/scale_ece/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_ece/precheck/meta/main.yml b/roles/scale_ece/precheck/meta/main.yml index 1ae3f767..fb8c5b02 100644 --- a/roles/scale_ece/precheck/meta/main.yml +++ b/roles/scale_ece/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_ece/upgrade/meta/main.yml b/roles/scale_ece/upgrade/meta/main.yml index b45dc4b4..db7ecfa4 100644 --- a/roles/scale_ece/upgrade/meta/main.yml +++ b/roles/scale_ece/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_fileauditlogging/cluster/meta/main.yml b/roles/scale_fileauditlogging/cluster/meta/main.yml index 2a7f49a9..171c6720 100644 --- a/roles/scale_fileauditlogging/cluster/meta/main.yml +++ b/roles/scale_fileauditlogging/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_fileauditlogging/node/meta/main.yml b/roles/scale_fileauditlogging/node/meta/main.yml index 8c18242e..ae6f91b6 100644 --- a/roles/scale_fileauditlogging/node/meta/main.yml +++ b/roles/scale_fileauditlogging/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_fileauditlogging/postcheck/meta/main.yml b/roles/scale_fileauditlogging/postcheck/meta/main.yml index 2a7f49a9..171c6720 100644 --- a/roles/scale_fileauditlogging/postcheck/meta/main.yml +++ b/roles/scale_fileauditlogging/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_fileauditlogging/precheck/meta/main.yml b/roles/scale_fileauditlogging/precheck/meta/main.yml index 2a7f49a9..171c6720 100644 --- a/roles/scale_fileauditlogging/precheck/meta/main.yml +++ b/roles/scale_fileauditlogging/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_fileauditlogging/upgrade/meta/main.yml b/roles/scale_fileauditlogging/upgrade/meta/main.yml index 8c18242e..ae6f91b6 100644 --- a/roles/scale_fileauditlogging/upgrade/meta/main.yml +++ b/roles/scale_fileauditlogging/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_hdfs/cluster/meta/main.yml b/roles/scale_hdfs/cluster/meta/main.yml index e5a842f3..1155d7a8 100644 --- a/roles/scale_hdfs/cluster/meta/main.yml +++ b/roles/scale_hdfs/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_hdfs/node/meta/main.yml b/roles/scale_hdfs/node/meta/main.yml index 64229bd5..aa552c36 100644 --- a/roles/scale_hdfs/node/meta/main.yml +++ b/roles/scale_hdfs/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_hdfs/postcheck/meta/main.yml b/roles/scale_hdfs/postcheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/scale_hdfs/postcheck/meta/main.yml +++ b/roles/scale_hdfs/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_hdfs/precheck/meta/main.yml b/roles/scale_hdfs/precheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/scale_hdfs/precheck/meta/main.yml +++ b/roles/scale_hdfs/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_hdfs/upgrade/meta/main.yml b/roles/scale_hdfs/upgrade/meta/main.yml index ebe71671..9722f0d4 100644 --- a/roles/scale_hdfs/upgrade/meta/main.yml +++ b/roles/scale_hdfs/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_hpt/node/meta/main.yml b/roles/scale_hpt/node/meta/main.yml index 729731da..f79ac190 100644 --- a/roles/scale_hpt/node/meta/main.yml +++ b/roles/scale_hpt/node/meta/main.yml @@ -3,7 +3,9 @@ galaxy_info: author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS package company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: diff --git a/roles/scale_hpt/postcheck/meta/main.yml b/roles/scale_hpt/postcheck/meta/main.yml index 1a05efc9..cd2e0509 100644 --- a/roles/scale_hpt/postcheck/meta/main.yml +++ b/roles/scale_hpt/postcheck/meta/main.yml @@ -3,7 +3,9 @@ galaxy_info: author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS (HPT) company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: diff --git a/roles/scale_hpt/precheck/meta/main.yml b/roles/scale_hpt/precheck/meta/main.yml index 1a05efc9..cd2e0509 100644 --- a/roles/scale_hpt/precheck/meta/main.yml +++ b/roles/scale_hpt/precheck/meta/main.yml @@ -3,7 +3,9 @@ galaxy_info: author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS (HPT) company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: diff --git a/roles/scale_hpt/upgrade/meta/main.yml b/roles/scale_hpt/upgrade/meta/main.yml index 729731da..f79ac190 100644 --- a/roles/scale_hpt/upgrade/meta/main.yml +++ b/roles/scale_hpt/upgrade/meta/main.yml @@ -3,7 +3,9 @@ galaxy_info: author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS package company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: diff --git a/roles/scale_object/cluster/meta/main.yml b/roles/scale_object/cluster/meta/main.yml index 504740a2..7908f877 100644 --- a/roles/scale_object/cluster/meta/main.yml +++ b/roles/scale_object/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_object/node/meta/main.yml b/roles/scale_object/node/meta/main.yml index d1977a7a..eb950c1c 100644 --- a/roles/scale_object/node/meta/main.yml +++ b/roles/scale_object/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_object/postcheck/meta/main.yml b/roles/scale_object/postcheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/scale_object/postcheck/meta/main.yml +++ b/roles/scale_object/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_object/precheck/meta/main.yml b/roles/scale_object/precheck/meta/main.yml index a62a1517..30bfbf63 100644 --- a/roles/scale_object/precheck/meta/main.yml +++ b/roles/scale_object/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/scale_object/upgrade/meta/main.yml b/roles/scale_object/upgrade/meta/main.yml index ddd3acff..01fa1d1f 100644 --- a/roles/scale_object/upgrade/meta/main.yml +++ b/roles/scale_object/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/smb/cluster/meta/main.yml b/roles/smb/cluster/meta/main.yml index e5a842f3..1155d7a8 100644 --- a/roles/smb/cluster/meta/main.yml +++ b/roles/smb/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/smb/node/meta/main.yml b/roles/smb/node/meta/main.yml index 3fdc4f97..f72ea666 100644 --- a/roles/smb/node/meta/main.yml +++ b/roles/smb/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/smb/postcheck/meta/main.yml b/roles/smb/postcheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/smb/postcheck/meta/main.yml +++ b/roles/smb/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/smb/precheck/meta/main.yml b/roles/smb/precheck/meta/main.yml index 2a7d556b..dab8063f 100644 --- a/roles/smb/precheck/meta/main.yml +++ b/roles/smb/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/smb/upgrade/meta/main.yml b/roles/smb/upgrade/meta/main.yml index ebe71671..9722f0d4 100644 --- a/roles/smb/upgrade/meta/main.yml +++ b/roles/smb/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/zimon/cluster/meta/main.yml b/roles/zimon/cluster/meta/main.yml index 03dd0794..e6e01961 100644 --- a/roles/zimon/cluster/meta/main.yml +++ b/roles/zimon/cluster/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/zimon/node/meta/main.yml b/roles/zimon/node/meta/main.yml index 03dd0794..e6e01961 100644 --- a/roles/zimon/node/meta/main.yml +++ b/roles/zimon/node/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/zimon/postcheck/meta/main.yml b/roles/zimon/postcheck/meta/main.yml index da9f40d2..3f570dcd 100644 --- a/roles/zimon/postcheck/meta/main.yml +++ b/roles/zimon/postcheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/zimon/precheck/meta/main.yml b/roles/zimon/precheck/meta/main.yml index 03dd0794..e6e01961 100644 --- a/roles/zimon/precheck/meta/main.yml +++ b/roles/zimon/precheck/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL diff --git a/roles/zimon/upgrade/meta/main.yml b/roles/zimon/upgrade/meta/main.yml index 03dd0794..e6e01961 100644 --- a/roles/zimon/upgrade/meta/main.yml +++ b/roles/zimon/upgrade/meta/main.yml @@ -3,8 +3,10 @@ galaxy_info: author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL From 42449f34fcfbfd027b9ed3ddc4030fbfc826ab15 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Sun, 31 Oct 2021 20:18:44 +0100 Subject: [PATCH 007/113] Fix indentation Signed-off-by: Achim Christ --- roles/callhome/node/meta/main.yml | 2 +- roles/callhome/postcheck/meta/main.yml | 2 +- roles/core/node/meta/main.yml | 2 +- roles/core/precheck/meta/main.yml | 2 +- roles/core/upgrade/meta/main.yml | 2 +- roles/nfs/common/meta/main.yml | 2 +- roles/nfs/node/meta/main.yml | 4 ++-- roles/nfs/upgrade/meta/main.yml | 2 +- roles/scale_auth/upgrade/meta/main.yml | 2 +- roles/scale_ece/node/meta/main.yml | 2 +- roles/scale_ece/upgrade/meta/main.yml | 2 +- roles/scale_hdfs/cluster/meta/main.yml | 2 +- roles/scale_hdfs/node/meta/main.yml | 4 ++-- roles/scale_hdfs/upgrade/meta/main.yml | 2 +- roles/scale_object/node/meta/main.yml | 4 ++-- roles/scale_object/precheck/meta/main.yml | 2 +- roles/scale_object/upgrade/meta/main.yml | 4 ++-- roles/smb/cluster/meta/main.yml | 2 +- roles/smb/node/meta/main.yml | 4 ++-- roles/smb/upgrade/meta/main.yml | 2 +- 20 files changed, 25 insertions(+), 25 deletions(-) diff --git a/roles/callhome/node/meta/main.yml b/roles/callhome/node/meta/main.yml index 21f2356f..13b86030 100755 --- a/roles/callhome/node/meta/main.yml +++ b/roles/callhome/node/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/callhome/postcheck/meta/main.yml b/roles/callhome/postcheck/meta/main.yml index d91e0cff..73f72377 100755 --- a/roles/callhome/postcheck/meta/main.yml +++ b/roles/callhome/postcheck/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: [] -# - common +# - common diff --git a/roles/core/node/meta/main.yml b/roles/core/node/meta/main.yml index 3cdd5230..6ca5131a 100644 --- a/roles/core/node/meta/main.yml +++ b/roles/core/node/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - common + - common diff --git a/roles/core/precheck/meta/main.yml b/roles/core/precheck/meta/main.yml index 3cdd5230..6ca5131a 100644 --- a/roles/core/precheck/meta/main.yml +++ b/roles/core/precheck/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - common + - common diff --git a/roles/core/upgrade/meta/main.yml b/roles/core/upgrade/meta/main.yml index 3cdd5230..6ca5131a 100644 --- a/roles/core/upgrade/meta/main.yml +++ b/roles/core/upgrade/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - common + - common diff --git a/roles/nfs/common/meta/main.yml b/roles/nfs/common/meta/main.yml index 9e6f1f9c..13b86030 100644 --- a/roles/nfs/common/meta/main.yml +++ b/roles/nfs/common/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: -- core/common + - core/common diff --git a/roles/nfs/node/meta/main.yml b/roles/nfs/node/meta/main.yml index a6818ed3..3cec205a 100644 --- a/roles/nfs/node/meta/main.yml +++ b/roles/nfs/node/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - nfs/precheck + - core/common + - nfs/precheck diff --git a/roles/nfs/upgrade/meta/main.yml b/roles/nfs/upgrade/meta/main.yml index 9722f0d4..13b86030 100644 --- a/roles/nfs/upgrade/meta/main.yml +++ b/roles/nfs/upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/scale_auth/upgrade/meta/main.yml b/roles/scale_auth/upgrade/meta/main.yml index 9722f0d4..13b86030 100644 --- a/roles/scale_auth/upgrade/meta/main.yml +++ b/roles/scale_auth/upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/scale_ece/node/meta/main.yml b/roles/scale_ece/node/meta/main.yml index db7ecfa4..1dca38dd 100644 --- a/roles/scale_ece/node/meta/main.yml +++ b/roles/scale_ece/node/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/scale_ece/upgrade/meta/main.yml b/roles/scale_ece/upgrade/meta/main.yml index db7ecfa4..1dca38dd 100644 --- a/roles/scale_ece/upgrade/meta/main.yml +++ b/roles/scale_ece/upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/scale_hdfs/cluster/meta/main.yml b/roles/scale_hdfs/cluster/meta/main.yml index 1155d7a8..2a32d5d7 100644 --- a/roles/scale_hdfs/cluster/meta/main.yml +++ b/roles/scale_hdfs/cluster/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - nfs/common + - nfs/common diff --git a/roles/scale_hdfs/node/meta/main.yml b/roles/scale_hdfs/node/meta/main.yml index aa552c36..0918df39 100644 --- a/roles/scale_hdfs/node/meta/main.yml +++ b/roles/scale_hdfs/node/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - scale_hdfs/precheck + - core/common + - scale_hdfs/precheck diff --git a/roles/scale_hdfs/upgrade/meta/main.yml b/roles/scale_hdfs/upgrade/meta/main.yml index 9722f0d4..13b86030 100644 --- a/roles/scale_hdfs/upgrade/meta/main.yml +++ b/roles/scale_hdfs/upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/scale_object/node/meta/main.yml b/roles/scale_object/node/meta/main.yml index eb950c1c..b85b4c6a 100644 --- a/roles/scale_object/node/meta/main.yml +++ b/roles/scale_object/node/meta/main.yml @@ -16,5 +16,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - scale_object/precheck - - nfs/common + - scale_object/precheck + - nfs/common diff --git a/roles/scale_object/precheck/meta/main.yml b/roles/scale_object/precheck/meta/main.yml index 30bfbf63..0c05ab16 100644 --- a/roles/scale_object/precheck/meta/main.yml +++ b/roles/scale_object/precheck/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common diff --git a/roles/scale_object/upgrade/meta/main.yml b/roles/scale_object/upgrade/meta/main.yml index 01fa1d1f..3f62aba0 100644 --- a/roles/scale_object/upgrade/meta/main.yml +++ b/roles/scale_object/upgrade/meta/main.yml @@ -16,5 +16,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - nfs/common + - core/common + - nfs/common diff --git a/roles/smb/cluster/meta/main.yml b/roles/smb/cluster/meta/main.yml index 1155d7a8..2a32d5d7 100644 --- a/roles/smb/cluster/meta/main.yml +++ b/roles/smb/cluster/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - nfs/common + - nfs/common diff --git a/roles/smb/node/meta/main.yml b/roles/smb/node/meta/main.yml index f72ea666..e05eaf05 100644 --- a/roles/smb/node/meta/main.yml +++ b/roles/smb/node/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - smb/precheck + - core/common + - smb/precheck diff --git a/roles/smb/upgrade/meta/main.yml b/roles/smb/upgrade/meta/main.yml index 9722f0d4..13b86030 100644 --- a/roles/smb/upgrade/meta/main.yml +++ b/roles/smb/upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core/common From 015e337cc6b293a31e9264e0cc0737738e731a90 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Sun, 31 Oct 2021 20:27:12 +0100 Subject: [PATCH 008/113] Rename all roles (breaking) Signed-off-by: Achim Christ --- roles/{scale_hpt/node => afm_cos_install}/defaults/main.yml | 0 roles/{scale_hpt/node => afm_cos_install}/meta/main.yml | 0 roles/{scale_hpt/node => afm_cos_install}/tasks/apt/install.yml | 0 roles/{scale_hpt/node => afm_cos_install}/tasks/install.yml | 0 .../{scale_hpt/node => afm_cos_install}/tasks/install_dir_pkg.yml | 0 .../node => afm_cos_install}/tasks/install_local_pkg.yml | 0 .../node => afm_cos_install}/tasks/install_repository.yml | 0 roles/{scale_hpt/node => afm_cos_install}/tasks/main.yml | 0 roles/{scale_hpt/node => afm_cos_install}/tasks/yum/install.yml | 0 .../{scale_hpt/node => afm_cos_install}/tasks/zypper/install.yml | 0 roles/{scale_hpt/node => afm_cos_install}/vars/main.yml | 0 roles/{scale_hpt/postcheck => afm_cos_prepare}/defaults/main.yml | 0 roles/{scale_hpt/postcheck => afm_cos_prepare}/meta/main.yml | 0 roles/{scale_hpt/postcheck => afm_cos_prepare}/tasks/main.yml | 0 roles/{scale_hpt/upgrade => afm_cos_upgrade}/defaults/main.yml | 0 roles/{scale_hpt/upgrade => afm_cos_upgrade}/meta/main.yml | 0 .../{scale_hpt/upgrade => afm_cos_upgrade}/tasks/apt/install.yml | 0 roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/install.yml | 0 .../upgrade => afm_cos_upgrade}/tasks/install_dir_pkg.yml | 0 .../upgrade => afm_cos_upgrade}/tasks/install_local_pkg.yml | 0 .../upgrade => afm_cos_upgrade}/tasks/install_repository.yml | 0 roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/main.yml | 0 .../{scale_hpt/upgrade => afm_cos_upgrade}/tasks/yum/install.yml | 0 .../upgrade => afm_cos_upgrade}/tasks/zypper/install.yml | 0 roles/{scale_hpt/upgrade => afm_cos_upgrade}/vars/main.yml | 0 roles/{scale_hpt/precheck => afm_cos_verify}/defaults/main.yml | 0 roles/{scale_hpt/precheck => afm_cos_verify}/meta/main.yml | 0 roles/{scale_hpt/precheck => afm_cos_verify}/tasks/main.yml | 0 roles/{scale_auth/upgrade => auth_upgrade}/defaults/main.yml | 0 roles/{callhome/node => auth_upgrade}/meta/main.yml | 0 roles/{scale_auth/upgrade => auth_upgrade}/tasks/auth.yml | 0 roles/{scale_auth/upgrade => auth_upgrade}/tasks/main.yml | 0 roles/{scale_auth/upgrade => auth_upgrade}/tasks/parseFile.yml | 0 roles/{scale_auth/upgrade => auth_upgrade}/vars/main.yml | 0 roles/{callhome/cluster => callhome_configure}/defaults/main.yml | 0 roles/{callhome/cluster => callhome_configure}/handlers/main.yml | 0 roles/{callhome/cluster => callhome_configure}/meta/main.yml | 0 .../{callhome/cluster => callhome_configure}/tasks/configure.yml | 0 roles/{callhome/cluster => callhome_configure}/tasks/main.yml | 0 roles/{callhome/cluster => callhome_configure}/vars/main.yml | 0 roles/{callhome/node => callhome_install}/defaults/main.yml | 0 roles/{callhome/node => callhome_install}/handlers/main.yml | 0 roles/{nfs/common => callhome_install}/meta/main.yml | 0 roles/{callhome/node => callhome_install}/tasks/apt/install.yml | 0 roles/{callhome/node => callhome_install}/tasks/install.yml | 0 .../node => callhome_install}/tasks/install_local_pkg.yml | 0 .../node => callhome_install}/tasks/install_remote_pkg.yml | 0 .../node => callhome_install}/tasks/install_repository.yml | 0 roles/{callhome/node => callhome_install}/tasks/main.yml | 0 roles/{callhome/node => callhome_install}/tasks/yum/install.yml | 0 .../{callhome/node => callhome_install}/tasks/zypper/install.yml | 0 roles/{callhome/node => callhome_install}/vars/main.yml | 0 roles/{callhome/precheck => callhome_prepare}/defaults/main.yml | 0 roles/{callhome/precheck => callhome_prepare}/handlers/main.yml | 0 roles/{callhome/precheck => callhome_prepare}/meta/main.yml | 0 roles/{callhome/precheck => callhome_prepare}/tasks/check.yml | 0 roles/{callhome/precheck => callhome_prepare}/tasks/main.yml | 0 roles/{callhome/postcheck => callhome_prepare}/vars/main.yml | 0 roles/{callhome/postcheck => callhome_verify}/defaults/main.yml | 0 roles/{callhome/postcheck => callhome_verify}/handlers/main.yml | 0 roles/{callhome/postcheck => callhome_verify}/meta/main.yml | 0 roles/{callhome/postcheck => callhome_verify}/tasks/check.yml | 0 roles/{callhome/postcheck => callhome_verify}/tasks/main.yml | 0 roles/{callhome/precheck => callhome_verify}/vars/main.yml | 0 roles/{nfs/common => ces_common}/defaults/main.yml | 0 roles/{nfs/upgrade => ces_common}/meta/main.yml | 0 roles/{nfs/common => ces_common}/tasks/check.yml | 0 roles/{nfs/common => ces_common}/tasks/configure.yml | 0 roles/{nfs/common => ces_common}/tasks/main.yml | 0 roles/{nfs/common => ces_common}/vars/main.yml | 0 roles/{core/common => core_common}/defaults/main.yml | 0 roles/{core/common => core_common}/handlers/main.yml | 0 roles/{core/cluster => core_common}/meta/main.yml | 0 roles/{core/common => core_common}/tasks/apt/set_vars.yml | 0 roles/{core/common => core_common}/tasks/check.yml | 0 roles/{core/common => core_common}/tasks/main.yml | 0 roles/{core/common => core_common}/tasks/yum/set_vars.yml | 0 roles/{core/common => core_common}/tasks/zypper/set_vars.yml | 0 roles/{core/cluster => core_common}/tests/inventory | 0 roles/{core/cluster => core_common}/tests/test.yml | 0 roles/{core/common => core_common}/vars/main.yml | 0 roles/{core/cluster => core_configure}/defaults/main.yml | 0 roles/{core/cluster => core_configure}/handlers/main.yml | 0 roles/{core/common => core_configure}/meta/main.yml | 0 roles/{core/cluster => core_configure}/tasks/check.yml | 0 roles/{core/cluster => core_configure}/tasks/cluster.yml | 0 roles/{core/cluster => core_configure}/tasks/cluster_start.yml | 0 roles/{core/cluster => core_configure}/tasks/config.yml | 0 roles/{core/cluster => core_configure}/tasks/finalize.yml | 0 roles/{core/cluster => core_configure}/tasks/install_gplbin.yml | 0 roles/{core/cluster => core_configure}/tasks/main.yml | 0 roles/{core/cluster => core_configure}/tasks/removenode.yml | 0 roles/{core/cluster => core_configure}/tasks/storage.yml | 0 roles/{core/cluster => core_configure}/tasks/storage_disk.yml | 0 roles/{core/cluster => core_configure}/tasks/storage_fs.yml | 0 roles/{core/cluster => core_configure}/templates/AddNodeFile.j2 | 0 roles/{core/cluster => core_configure}/templates/ChangeFile.j2 | 0 roles/{core/cluster => core_configure}/templates/NewNodeFile.j2 | 0 roles/{core/cluster => core_configure}/templates/NodeClass.j2 | 0 roles/{core/cluster => core_configure}/templates/StanzaFile.j2 | 0 roles/{core/cluster => core_configure}/templates/StanzaFile_fs.j2 | 0 .../{core/cluster => core_configure}/templates/StanzaFile_nsd.j2 | 0 roles/{core/common => core_configure}/tests/inventory | 0 roles/{core/common => core_configure}/tests/test.yml | 0 roles/{core/cluster => core_configure}/vars/main.yml | 0 roles/{core/node => core_install}/defaults/main.yml | 0 roles/{core/node => core_install}/handlers/main.yml | 0 roles/{core/node => core_install}/meta/main.yml | 0 roles/{core/node => core_install}/tasks/apt/install.yml | 0 roles/{core/node => core_install}/tasks/build.yml | 0 roles/{core/node => core_install}/tasks/finalize.yml | 0 roles/{core/node => core_install}/tasks/install.yml | 0 roles/{core/node => core_install}/tasks/install_dir_pkg.yml | 0 roles/{core/node => core_install}/tasks/install_gplbin.yml | 0 roles/{core/node => core_install}/tasks/install_license_pkg.yml | 0 .../node => core_install}/tasks/install_license_repository.yml | 0 roles/{core/node => core_install}/tasks/install_local_pkg.yml | 0 roles/{core/node => core_install}/tasks/install_remote_pkg.yml | 0 roles/{core/node => core_install}/tasks/install_repository.yml | 0 roles/{core/node => core_install}/tasks/main.yml | 0 roles/{core/node => core_install}/tasks/update.yml | 0 roles/{core/node => core_install}/tasks/upgrade.yml | 0 roles/{core/node => core_install}/tasks/yum/install.yml | 0 roles/{core/node => core_install}/tasks/zypper/install.yml | 0 roles/{core/node => core_install}/templates/AddNodeFile.j2 | 0 roles/{core/node => core_install}/templates/ChangeFile.j2 | 0 roles/{core/node => core_install}/templates/NewNodeFile.j2 | 0 roles/{core/node => core_install}/templates/NodeClass.j2 | 0 roles/{core/node => core_install}/templates/StanzaFile.j2 | 0 roles/{core/node => core_install}/tests/inventory | 0 roles/{core/node => core_install}/tests/test.yml | 0 roles/{core/node => core_install}/vars/main.yml | 0 roles/{core/precheck => core_prepare}/defaults/main.yml | 0 roles/{core/precheck => core_prepare}/handlers/main.yml | 0 roles/{core/precheck => core_prepare}/meta/main.yml | 0 roles/{core/precheck => core_prepare}/tasks/main.yml | 0 roles/{core/precheck => core_prepare}/tasks/prepare.yml | 0 roles/{core/postcheck => core_prepare}/tests/inventory | 0 roles/{core/postcheck => core_prepare}/tests/test.yml | 0 roles/{core/precheck => core_prepare}/vars/main.yml | 0 roles/{core/upgrade => core_upgrade}/defaults/main.yml | 0 roles/{core/upgrade => core_upgrade}/handlers/main.yml | 0 roles/{core/upgrade => core_upgrade}/meta/main.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/apt/install.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/build.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/finalize.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/install.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/install_dir_pkg.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/install_gplbin.yml | 0 .../{core/upgrade => core_upgrade}/tasks/install_license_pkg.yml | 0 .../upgrade => core_upgrade}/tasks/install_license_repository.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/install_local_pkg.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/install_remote_pkg.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/install_repository.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/main.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/yum/install.yml | 0 roles/{core/upgrade => core_upgrade}/tasks/zypper/install.yml | 0 roles/{core/precheck => core_upgrade}/tests/inventory | 0 roles/{core/precheck => core_upgrade}/tests/test.yml | 0 roles/{core/upgrade => core_upgrade}/vars/main.yml | 0 roles/{core/postcheck => core_verify}/defaults/main.yml | 0 roles/{core/postcheck => core_verify}/handlers/main.yml | 0 roles/{core/postcheck => core_verify}/meta/main.yml | 0 roles/{core/postcheck => core_verify}/tasks/main.yml | 0 roles/{core/upgrade => core_verify}/tests/inventory | 0 roles/{core/upgrade => core_verify}/tests/test.yml | 0 roles/{core/postcheck => core_verify}/vars/main.yml | 0 roles/{scale_ece/cluster => ece_configure}/defaults/main.yml | 0 roles/{scale_ece/cluster => ece_configure}/handlers/main.yml | 0 roles/{scale_ece/cluster => ece_configure}/meta/main.yml | 0 .../cluster => ece_configure}/tasks/create_filesystem.yml | 0 .../cluster => ece_configure}/tasks/create_recoverygroup.yml | 0 roles/{scale_ece/cluster => ece_configure}/tasks/create_vdisk.yml | 0 roles/{scale_ece/cluster => ece_configure}/tasks/main.yml | 0 roles/{remote_mount => ece_configure}/tests/inventory | 0 roles/{scale_ece/cluster => ece_configure}/tests/test.yml | 0 roles/{scale_ece/cluster => ece_configure}/vars/main.yml | 0 roles/{scale_ece/node => ece_install}/defaults/main.yml | 0 roles/{nfs/node => ece_install}/handlers/main.yml | 0 roles/{scale_ece/node => ece_install}/meta/main.yml | 0 roles/{scale_ece/node => ece_install}/tasks/install.yml | 0 roles/{scale_ece/node => ece_install}/tasks/install_dir_pkg.yml | 0 roles/{scale_ece/node => ece_install}/tasks/install_local_pkg.yml | 0 .../{scale_ece/node => ece_install}/tasks/install_remote_pkg.yml | 0 .../{scale_ece/node => ece_install}/tasks/install_repository.yml | 0 roles/{scale_ece/node => ece_install}/tasks/main.yml | 0 roles/{scale_ece/node => ece_install}/tasks/yum/install.yml | 0 roles/{scale_ece/node => ece_install}/vars/main.yml | 0 roles/{scale_ece/precheck => ece_prepare}/meta/main.yml | 0 roles/{scale_ece/precheck => ece_prepare}/tasks/check.yml | 0 roles/{scale_ece/precheck => ece_prepare}/tasks/main.yml | 0 roles/{scale_ece/upgrade => ece_upgrade}/defaults/main.yml | 0 roles/{nfs/upgrade => ece_upgrade}/handlers/main.yml | 0 roles/{scale_ece/upgrade => ece_upgrade}/meta/main.yml | 0 roles/{scale_ece/upgrade => ece_upgrade}/tasks/install.yml | 0 .../{scale_ece/upgrade => ece_upgrade}/tasks/install_dir_pkg.yml | 0 .../upgrade => ece_upgrade}/tasks/install_local_pkg.yml | 0 .../upgrade => ece_upgrade}/tasks/install_remote_pkg.yml | 0 .../upgrade => ece_upgrade}/tasks/install_repository.yml | 0 roles/{scale_ece/upgrade => ece_upgrade}/tasks/main.yml | 0 roles/{scale_ece/upgrade => ece_upgrade}/tasks/yum/install.yml | 0 roles/{scale_ece/upgrade => ece_upgrade}/vars/main.yml | 0 .../cluster => fal_configure}/defaults/main.yml | 0 .../cluster => fal_configure}/handlers/main.yml | 0 .../cluster => fal_configure}/meta/main.yml | 0 .../cluster => fal_configure}/tasks/configure.yml | 0 .../cluster => fal_configure}/tasks/configure_fal.yml | 0 .../cluster => fal_configure}/tasks/main.yml | 0 roles/{scale_ece/cluster => fal_configure}/tests/inventory | 0 .../cluster => fal_configure}/tests/test.yml | 0 .../cluster => fal_configure}/vars/main.yml | 0 .../node => fal_install}/defaults/main.yml | 0 roles/{scale_fileauditlogging/node => fal_install}/meta/main.yml | 0 .../node => fal_install}/tasks/apt/install.yml | 0 .../node => fal_install}/tasks/install.yml | 0 .../node => fal_install}/tasks/install_dir_pkg.yml | 0 .../node => fal_install}/tasks/install_local_pkg.yml | 0 .../node => fal_install}/tasks/install_remote_pkg.yml | 0 .../node => fal_install}/tasks/install_repository.yml | 0 roles/{scale_fileauditlogging/node => fal_install}/tasks/main.yml | 0 .../node => fal_install}/tasks/yum/install.yml | 0 .../node => fal_install}/tasks/zypper/install.yml | 0 .../cluster => fal_install}/tests/inventory | 0 roles/{scale_fileauditlogging/node => fal_install}/tests/test.yml | 0 roles/{scale_fileauditlogging/node => fal_install}/vars/main.yml | 0 .../precheck => fal_prepare}/defaults/main.yml | 0 .../precheck => fal_prepare}/handlers/main.yml | 0 .../postcheck => fal_prepare}/meta/main.yml | 0 .../precheck => fal_prepare}/tasks/main.yml | 0 .../{scale_fileauditlogging/node => fal_prepare}/tests/inventory | 0 .../precheck => fal_prepare}/tests/test.yml | 0 .../precheck => fal_prepare}/vars/main.yml | 0 .../upgrade => fal_upgrade}/defaults/main.yml | 0 .../upgrade => fal_upgrade}/handlers/main.yml | 0 .../{scale_fileauditlogging/upgrade => fal_upgrade}/meta/main.yml | 0 .../upgrade => fal_upgrade}/tasks/apt/install.yml | 0 .../upgrade => fal_upgrade}/tasks/install.yml | 0 .../upgrade => fal_upgrade}/tasks/install_dir_pkg.yml | 0 .../upgrade => fal_upgrade}/tasks/install_local_pkg.yml | 0 .../upgrade => fal_upgrade}/tasks/install_remote_pkg.yml | 0 .../upgrade => fal_upgrade}/tasks/install_repository.yml | 0 .../upgrade => fal_upgrade}/tasks/main.yml | 0 .../upgrade => fal_upgrade}/tasks/yum/install.yml | 0 .../upgrade => fal_upgrade}/tasks/zypper/install.yml | 0 .../postcheck => fal_upgrade}/tests/inventory | 0 .../upgrade => fal_upgrade}/tests/test.yml | 0 .../{scale_fileauditlogging/upgrade => fal_upgrade}/vars/main.yml | 0 .../postcheck => fal_verify}/defaults/main.yml | 0 .../postcheck => fal_verify}/handlers/main.yml | 0 .../{scale_fileauditlogging/precheck => fal_verify}/meta/main.yml | 0 .../postcheck => fal_verify}/tasks/check.yml | 0 .../postcheck => fal_verify}/tasks/main.yml | 0 .../precheck => fal_verify}/tests/inventory | 0 .../postcheck => fal_verify}/tests/test.yml | 0 .../postcheck => fal_verify}/vars/main.yml | 0 roles/{gui/cluster => gui_configure}/defaults/main.yml | 0 roles/{gui/cluster => gui_configure}/meta/main.yml | 0 roles/{gui/cluster => gui_configure}/tasks/chpasswdpolicy.yml | 0 roles/{gui/cluster => gui_configure}/tasks/configure.yml | 0 roles/{gui/cluster => gui_configure}/tasks/email.yml | 0 .../cluster => gui_configure}/tasks/hasi_vault_certificate.yml | 0 roles/{gui/cluster => gui_configure}/tasks/hasi_vault_user.yml | 0 roles/{gui/cluster => gui_configure}/tasks/ldap.yml | 0 roles/{gui/cluster => gui_configure}/tasks/main.yml | 0 roles/{gui/cluster => gui_configure}/tasks/snmp.yml | 0 roles/{gui/cluster => gui_configure}/tasks/users.yml | 0 roles/{gui/node => gui_install}/defaults/main.yml | 0 roles/{gui/node => gui_install}/meta/main.yml | 0 roles/{gui/node => gui_install}/tasks/apt/install.yml | 0 roles/{gui/node => gui_install}/tasks/install.yml | 0 roles/{gui/node => gui_install}/tasks/install_dir_pkg.yml | 0 roles/{gui/node => gui_install}/tasks/install_local_pkg.yml | 0 roles/{gui/node => gui_install}/tasks/install_remote_pkg.yml | 0 roles/{gui/node => gui_install}/tasks/install_repository.yml | 0 roles/{gui/node => gui_install}/tasks/main.yml | 0 roles/{gui/node => gui_install}/tasks/yum/install.yml | 0 roles/{gui/node => gui_install}/tasks/zypper/install.yml | 0 roles/{gui/node => gui_install}/vars/main.yml | 0 roles/{gui/precheck => gui_prepare}/defaults/main.yml | 0 roles/{gui/precheck => gui_prepare}/meta/main.yml | 0 roles/{gui/precheck => gui_prepare}/tasks/inventory_check.yml | 0 roles/{gui/precheck => gui_prepare}/tasks/main.yml | 0 roles/{gui/upgrade => gui_upgrade}/defaults/main.yml | 0 roles/{gui/upgrade => gui_upgrade}/meta/main.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/apt/install.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/install.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/install_dir_pkg.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/install_local_pkg.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/install_remote_pkg.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/install_repository.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/main.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/yum/install.yml | 0 roles/{gui/upgrade => gui_upgrade}/tasks/zypper/install.yml | 0 roles/{gui/upgrade => gui_upgrade}/vars/main.yml | 0 roles/{gui/postcheck => gui_verify}/defaults/main.yml | 0 roles/{gui/postcheck => gui_verify}/meta/main.yml | 0 roles/{gui/postcheck => gui_verify}/tasks/main.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/.travis.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/defaults/main.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/meta/main.yml | 0 .../{scale_hdfs/cluster => hdfs_configure}/tasks/append_dict.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/tasks/configure.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/tasks/env_setup.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/tasks/main.yml | 0 roles/{scale_hdfs/cluster => hdfs_configure}/vars/main.yml | 0 roles/{scale_hdfs/node => hdfs_install}/.travis.yml | 0 roles/{scale_hdfs/node => hdfs_install}/defaults/main.yml | 0 roles/{scale_ece/node => hdfs_install}/handlers/main.yml | 0 roles/{scale_hdfs/node => hdfs_install}/meta/main.yml | 0 roles/{scale_hdfs/node => hdfs_install}/tasks/install.yml | 0 roles/{scale_hdfs/node => hdfs_install}/tasks/install_dir_pkg.yml | 0 .../{scale_hdfs/node => hdfs_install}/tasks/install_local_pkg.yml | 0 .../node => hdfs_install}/tasks/install_remote_pkg.yml | 0 .../node => hdfs_install}/tasks/install_repository.yml | 0 roles/{scale_hdfs/node => hdfs_install}/tasks/main.yml | 0 roles/{scale_hdfs/node => hdfs_install}/tasks/prepare_env.yml | 0 roles/{scale_hdfs/node => hdfs_install}/tasks/yum/install.yml | 0 roles/{scale_hdfs/node => hdfs_install}/vars/main.yml | 0 roles/{scale_hdfs/postcheck => hdfs_prepare}/.travis.yml | 0 roles/{scale_hdfs/precheck => hdfs_prepare}/defaults/main.yml | 0 roles/{nfs/postcheck => hdfs_prepare}/meta/main.yml | 0 roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/check.yml | 0 roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/java_home.yml | 0 roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/main.yml | 0 roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/prepare_env.yml | 0 roles/{scale_hdfs/precheck => hdfs_prepare}/vars/main.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/defaults/main.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/handlers/mail.yml | 0 roles/{scale_auth/upgrade => hdfs_upgrade}/meta/main.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/main.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/prepare_env.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/upgrade.yml | 0 .../upgrade => hdfs_upgrade}/tasks/upgrade_dir_pkg.yml | 0 .../upgrade => hdfs_upgrade}/tasks/upgrade_local_pkg.yml | 0 .../upgrade => hdfs_upgrade}/tasks/upgrade_remote_pkg.yml | 0 .../upgrade => hdfs_upgrade}/tasks/upgrade_repository.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/yum/upgrade.yml | 0 roles/{scale_hdfs/upgrade => hdfs_upgrade}/vars/main.yml | 0 roles/{scale_hdfs/precheck => hdfs_verify}/.travis.yml | 0 roles/{scale_hdfs/postcheck => hdfs_verify}/defaults/main.yml | 0 roles/{nfs/precheck => hdfs_verify}/meta/main.yml | 0 roles/{scale_hdfs/postcheck => hdfs_verify}/tasks/check.yml | 0 roles/{scale_hdfs/postcheck => hdfs_verify}/tasks/main.yml | 0 roles/{scale_hdfs/postcheck => hdfs_verify}/vars/main.yml | 0 roles/{nfs/cluster => nfs_configure}/defaults/main.yml | 0 roles/{nfs/cluster => nfs_configure}/meta/main.yml | 0 roles/{nfs/cluster => nfs_configure}/tasks/configure.yml | 0 roles/{nfs/cluster => nfs_configure}/tasks/main.yml | 0 roles/{nfs/cluster => nfs_configure}/vars/main.yml | 0 roles/{nfs/node => nfs_install}/defaults/main.yml | 0 roles/{scale_ece/upgrade => nfs_install}/handlers/main.yml | 0 roles/{nfs/node => nfs_install}/meta/main.yml | 0 roles/{nfs/node => nfs_install}/tasks/apt/install.yml | 0 roles/{nfs/node => nfs_install}/tasks/install.yml | 0 roles/{nfs/node => nfs_install}/tasks/install_dir_pkg.yml | 0 roles/{nfs/node => nfs_install}/tasks/install_local_pkg.yml | 0 roles/{nfs/node => nfs_install}/tasks/install_remote_pkg.yml | 0 roles/{nfs/node => nfs_install}/tasks/install_repository.yml | 0 roles/{nfs/node => nfs_install}/tasks/main.yml | 0 roles/{nfs/node => nfs_install}/tasks/yum/install.yml | 0 roles/{nfs/node => nfs_install}/tasks/zypper/install.yml | 0 roles/{nfs/node => nfs_install}/vars/main.yml | 0 roles/{scale_hdfs/postcheck => nfs_prepare}/meta/main.yml | 0 roles/{nfs/precheck => nfs_prepare}/tasks/check.yml | 0 roles/{nfs/precheck => nfs_prepare}/tasks/main.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/defaults/main.yml | 0 roles/{scale_hdfs/node => nfs_upgrade}/handlers/main.yml | 0 roles/{scale_hdfs/upgrade => nfs_upgrade}/meta/main.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/apt/install.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/install.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/install_dir_pkg.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/install_local_pkg.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/install_remote_pkg.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/install_repository.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/main.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/yum/install.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/tasks/zypper/install.yml | 0 roles/{nfs/upgrade => nfs_upgrade}/vars/main.yml | 0 roles/{nfs/postcheck => nfs_verify}/defaults/main.yml | 0 roles/{scale_hdfs/precheck => nfs_verify}/meta/main.yml | 0 roles/{nfs/postcheck => nfs_verify}/tasks/check.yml | 0 roles/{nfs/postcheck => nfs_verify}/tasks/main.yml | 0 roles/{nfs/postcheck => nfs_verify}/vars/main.yml | 0 roles/{scale_object/cluster => obj_configure}/defaults/main.yml | 0 roles/{scale_object/cluster => obj_configure}/meta/main.yml | 0 roles/{scale_object/cluster => obj_configure}/tasks/configure.yml | 0 .../cluster => obj_configure}/tasks/configure_pmswift.yml | 0 roles/{scale_object/cluster => obj_configure}/tasks/main.yml | 0 .../cluster => obj_configure}/templates/obj_passwd.j2 | 0 roles/{scale_object/cluster => obj_configure}/vars/main.yml | 0 roles/{scale_object/node => obj_install}/defaults/main.yml | 0 roles/{scale_object/node => obj_install}/handlers/main.yml | 0 roles/{scale_object/node => obj_install}/meta/main.yml | 0 roles/{scale_object/node => obj_install}/tasks/install.yml | 0 .../{scale_object/node => obj_install}/tasks/install_dir_pkg.yml | 0 .../node => obj_install}/tasks/install_local_pkg.yml | 0 .../{scale_object/node => obj_install}/tasks/install_pmswift.yml | 0 .../node => obj_install}/tasks/install_remote_pkg.yml | 0 .../node => obj_install}/tasks/install_repository.yml | 0 roles/{scale_object/node => obj_install}/tasks/main.yml | 0 roles/{scale_object/node => obj_install}/tasks/yum/install.yml | 0 roles/{scale_object/node => obj_install}/vars/main.yml | 0 roles/{scale_object/precheck => obj_prepare}/default/main.yml | 0 roles/{scale_object/precheck => obj_prepare}/meta/main.yml | 0 roles/{scale_object/precheck => obj_prepare}/tasks/check.yml | 0 .../precheck => obj_prepare}/tasks/inventory_check.yml | 0 roles/{scale_object/precheck => obj_prepare}/tasks/main.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/defaults/main.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/handlers/main.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/meta/main.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/tasks/install.yml | 0 .../upgrade => obj_upgrade}/tasks/install_dir_pkg.yml | 0 .../upgrade => obj_upgrade}/tasks/install_local_pkg.yml | 0 .../upgrade => obj_upgrade}/tasks/install_pmswift.yml | 0 .../upgrade => obj_upgrade}/tasks/install_remote_pkg.yml | 0 .../upgrade => obj_upgrade}/tasks/install_repository.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/tasks/main.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/tasks/yum/install.yml | 0 roles/{scale_object/upgrade => obj_upgrade}/vars/main.yml | 0 roles/{scale_object/postcheck => obj_verify}/meta/main.yml | 0 roles/{scale_object/postcheck => obj_verify}/tasks/check.yml | 0 roles/{scale_object/postcheck => obj_verify}/tasks/main.yml | 0 roles/{scale_object/postcheck => obj_verify}/vars/main.yml | 0 roles/{zimon/cluster => perfmon_configure}/defaults/main.yml | 0 roles/{zimon/cluster => perfmon_configure}/meta/main.yml | 0 roles/{zimon/cluster => perfmon_configure}/tasks/configure.yml | 0 roles/{zimon/cluster => perfmon_configure}/tasks/main.yml | 0 roles/{zimon/node => perfmon_install}/defaults/main.yml | 0 roles/{zimon/node => perfmon_install}/meta/main.yml | 0 roles/{zimon/node => perfmon_install}/tasks/apt/install.yml | 0 roles/{zimon/node => perfmon_install}/tasks/install.yml | 0 roles/{zimon/node => perfmon_install}/tasks/install_dir_pkg.yml | 0 roles/{zimon/node => perfmon_install}/tasks/install_local_pkg.yml | 0 .../{zimon/node => perfmon_install}/tasks/install_remote_pkg.yml | 0 .../{zimon/node => perfmon_install}/tasks/install_repository.yml | 0 roles/{zimon/node => perfmon_install}/tasks/main.yml | 0 roles/{zimon/node => perfmon_install}/tasks/yum/install.yml | 0 roles/{zimon/node => perfmon_install}/tasks/zypper/install.yml | 0 roles/{zimon/node => perfmon_install}/vars/main.yml | 0 roles/{zimon/precheck => perfmon_prepare}/defaults/main.yml | 0 roles/{zimon/precheck => perfmon_prepare}/meta/main.yml | 0 roles/{zimon/precheck => perfmon_prepare}/tasks/main.yml | 0 roles/{zimon/precheck => perfmon_prepare}/vars/main.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/defaults/main.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/meta/main.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/tasks/apt/install.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/tasks/install.yml | 0 .../{zimon/upgrade => perfmon_upgrade}/tasks/install_dir_pkg.yml | 0 .../upgrade => perfmon_upgrade}/tasks/install_local_pkg.yml | 0 .../upgrade => perfmon_upgrade}/tasks/install_remote_pkg.yml | 0 .../upgrade => perfmon_upgrade}/tasks/install_repository.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/tasks/main.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/tasks/yum/install.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/tasks/zypper/install.yml | 0 roles/{zimon/upgrade => perfmon_upgrade}/vars/main.yml | 0 roles/{zimon/postcheck => perfmon_verify}/defaults/main.yml | 0 roles/{zimon/postcheck => perfmon_verify}/meta/main.yml | 0 roles/{zimon/postcheck => perfmon_verify}/tasks/main.yml | 0 roles/{remote_mount => remotemount_configure}/.yamllint | 0 roles/{remote_mount => remotemount_configure}/README.md | 0 roles/{remote_mount => remotemount_configure}/defaults/main.yml | 0 roles/{remote_mount => remotemount_configure}/handlers/main.yml | 0 roles/{remote_mount => remotemount_configure}/meta/main.yml | 0 .../molecule/default/INSTALL.rst | 0 .../molecule/default/converge.yml | 0 .../molecule/default/molecule.yml | 0 .../molecule/default/verify.yml | 0 .../tasks/cleanup_filesystem_api_cli.yml | 0 .../tasks/cleanup_filesystems.yml | 0 .../tasks/cleanup_remote_mount.yml | 0 .../tasks/cleanup_remote_mount_api_cli.yml | 0 .../tasks/delete_remote_cluster.yml | 0 roles/{remote_mount => remotemount_configure}/tasks/main.yml | 0 .../tasks/mount_filesystem_api_cli.yml | 0 .../tasks/mount_filesystems.yml | 0 roles/{remote_mount => remotemount_configure}/tasks/precheck.yml | 0 .../tasks/remotecluster.yml | 0 .../tasks/remotecluster_api_cli.yml | 0 .../upgrade => remotemount_configure}/tests/inventory | 0 roles/{remote_mount => remotemount_configure}/tests/test.yml | 0 roles/{remote_mount => remotemount_configure}/vars/main.yml | 0 roles/{smb/cluster => smb_configure}/defaults/main.yml | 0 roles/{smb/cluster => smb_configure}/meta/main.yml | 0 roles/{smb/cluster => smb_configure}/tasks/configure.yml | 0 roles/{smb/cluster => smb_configure}/tasks/main.yml | 0 roles/{smb/cluster => smb_configure}/vars/main.yml | 0 roles/{smb/node => smb_install}/defaults/main.yml | 0 roles/{smb/node => smb_install}/handlers/main.yml | 0 roles/{smb/node => smb_install}/meta/main.yml | 0 roles/{smb/node => smb_install}/tasks/apt/install.yml | 0 roles/{smb/node => smb_install}/tasks/install.yml | 0 roles/{smb/node => smb_install}/tasks/install_dir_pkg.yml | 0 roles/{smb/node => smb_install}/tasks/install_local_pkg.yml | 0 roles/{smb/node => smb_install}/tasks/install_remote_pkg.yml | 0 roles/{smb/node => smb_install}/tasks/install_repository.yml | 0 roles/{smb/node => smb_install}/tasks/main.yml | 0 roles/{smb/node => smb_install}/tasks/yum/install.yml | 0 roles/{smb/node => smb_install}/tasks/zypper/install.yml | 0 roles/{smb/node => smb_install}/vars/main.yml | 0 roles/{smb/postcheck => smb_prepare}/meta/main.yml | 0 roles/{smb/precheck => smb_prepare}/tasks/check.yml | 0 roles/{smb/precheck => smb_prepare}/tasks/main.yml | 0 roles/{smb/upgrade => smb_upgrade}/defaults/main.yml | 0 roles/{smb/upgrade => smb_upgrade}/handlers/main.yml | 0 roles/{smb/upgrade => smb_upgrade}/meta/main.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/apt/install.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/install.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/install_dir_pkg.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/install_local_pkg.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/install_remote_pkg.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/install_repository.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/main.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/yum/install.yml | 0 roles/{smb/upgrade => smb_upgrade}/tasks/zypper/install.yml | 0 roles/{smb/upgrade => smb_upgrade}/vars/main.yml | 0 roles/{smb/postcheck => smb_verify}/defaults/main.yml | 0 roles/{smb/precheck => smb_verify}/meta/main.yml | 0 roles/{smb/postcheck => smb_verify}/tasks/check.yml | 0 roles/{smb/postcheck => smb_verify}/tasks/main.yml | 0 roles/{smb/postcheck => smb_verify}/vars/main.yml | 0 520 files changed, 0 insertions(+), 0 deletions(-) rename roles/{scale_hpt/node => afm_cos_install}/defaults/main.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/meta/main.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/apt/install.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/install.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/install_local_pkg.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/install_repository.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/main.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/yum/install.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/tasks/zypper/install.yml (100%) rename roles/{scale_hpt/node => afm_cos_install}/vars/main.yml (100%) rename roles/{scale_hpt/postcheck => afm_cos_prepare}/defaults/main.yml (100%) rename roles/{scale_hpt/postcheck => afm_cos_prepare}/meta/main.yml (100%) rename roles/{scale_hpt/postcheck => afm_cos_prepare}/tasks/main.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/defaults/main.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/meta/main.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/apt/install.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/install.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/install_repository.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/main.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/yum/install.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/tasks/zypper/install.yml (100%) rename roles/{scale_hpt/upgrade => afm_cos_upgrade}/vars/main.yml (100%) rename roles/{scale_hpt/precheck => afm_cos_verify}/defaults/main.yml (100%) rename roles/{scale_hpt/precheck => afm_cos_verify}/meta/main.yml (100%) rename roles/{scale_hpt/precheck => afm_cos_verify}/tasks/main.yml (100%) rename roles/{scale_auth/upgrade => auth_upgrade}/defaults/main.yml (100%) rename roles/{callhome/node => auth_upgrade}/meta/main.yml (100%) mode change 100755 => 100644 rename roles/{scale_auth/upgrade => auth_upgrade}/tasks/auth.yml (100%) rename roles/{scale_auth/upgrade => auth_upgrade}/tasks/main.yml (100%) rename roles/{scale_auth/upgrade => auth_upgrade}/tasks/parseFile.yml (100%) rename roles/{scale_auth/upgrade => auth_upgrade}/vars/main.yml (100%) rename roles/{callhome/cluster => callhome_configure}/defaults/main.yml (100%) rename roles/{callhome/cluster => callhome_configure}/handlers/main.yml (100%) rename roles/{callhome/cluster => callhome_configure}/meta/main.yml (100%) rename roles/{callhome/cluster => callhome_configure}/tasks/configure.yml (100%) rename roles/{callhome/cluster => callhome_configure}/tasks/main.yml (100%) rename roles/{callhome/cluster => callhome_configure}/vars/main.yml (100%) rename roles/{callhome/node => callhome_install}/defaults/main.yml (100%) rename roles/{callhome/node => callhome_install}/handlers/main.yml (100%) rename roles/{nfs/common => callhome_install}/meta/main.yml (100%) mode change 100644 => 100755 rename roles/{callhome/node => callhome_install}/tasks/apt/install.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/install.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/install_local_pkg.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/install_remote_pkg.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/install_repository.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/main.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/yum/install.yml (100%) rename roles/{callhome/node => callhome_install}/tasks/zypper/install.yml (100%) rename roles/{callhome/node => callhome_install}/vars/main.yml (100%) rename roles/{callhome/precheck => callhome_prepare}/defaults/main.yml (100%) rename roles/{callhome/precheck => callhome_prepare}/handlers/main.yml (100%) rename roles/{callhome/precheck => callhome_prepare}/meta/main.yml (100%) rename roles/{callhome/precheck => callhome_prepare}/tasks/check.yml (100%) rename roles/{callhome/precheck => callhome_prepare}/tasks/main.yml (100%) rename roles/{callhome/postcheck => callhome_prepare}/vars/main.yml (100%) rename roles/{callhome/postcheck => callhome_verify}/defaults/main.yml (100%) rename roles/{callhome/postcheck => callhome_verify}/handlers/main.yml (100%) rename roles/{callhome/postcheck => callhome_verify}/meta/main.yml (100%) rename roles/{callhome/postcheck => callhome_verify}/tasks/check.yml (100%) rename roles/{callhome/postcheck => callhome_verify}/tasks/main.yml (100%) rename roles/{callhome/precheck => callhome_verify}/vars/main.yml (100%) rename roles/{nfs/common => ces_common}/defaults/main.yml (100%) rename roles/{nfs/upgrade => ces_common}/meta/main.yml (100%) rename roles/{nfs/common => ces_common}/tasks/check.yml (100%) rename roles/{nfs/common => ces_common}/tasks/configure.yml (100%) rename roles/{nfs/common => ces_common}/tasks/main.yml (100%) rename roles/{nfs/common => ces_common}/vars/main.yml (100%) rename roles/{core/common => core_common}/defaults/main.yml (100%) rename roles/{core/common => core_common}/handlers/main.yml (100%) rename roles/{core/cluster => core_common}/meta/main.yml (100%) rename roles/{core/common => core_common}/tasks/apt/set_vars.yml (100%) rename roles/{core/common => core_common}/tasks/check.yml (100%) rename roles/{core/common => core_common}/tasks/main.yml (100%) rename roles/{core/common => core_common}/tasks/yum/set_vars.yml (100%) rename roles/{core/common => core_common}/tasks/zypper/set_vars.yml (100%) rename roles/{core/cluster => core_common}/tests/inventory (100%) rename roles/{core/cluster => core_common}/tests/test.yml (100%) rename roles/{core/common => core_common}/vars/main.yml (100%) rename roles/{core/cluster => core_configure}/defaults/main.yml (100%) rename roles/{core/cluster => core_configure}/handlers/main.yml (100%) rename roles/{core/common => core_configure}/meta/main.yml (100%) rename roles/{core/cluster => core_configure}/tasks/check.yml (100%) rename roles/{core/cluster => core_configure}/tasks/cluster.yml (100%) rename roles/{core/cluster => core_configure}/tasks/cluster_start.yml (100%) rename roles/{core/cluster => core_configure}/tasks/config.yml (100%) rename roles/{core/cluster => core_configure}/tasks/finalize.yml (100%) rename roles/{core/cluster => core_configure}/tasks/install_gplbin.yml (100%) rename roles/{core/cluster => core_configure}/tasks/main.yml (100%) rename roles/{core/cluster => core_configure}/tasks/removenode.yml (100%) rename roles/{core/cluster => core_configure}/tasks/storage.yml (100%) rename roles/{core/cluster => core_configure}/tasks/storage_disk.yml (100%) rename roles/{core/cluster => core_configure}/tasks/storage_fs.yml (100%) rename roles/{core/cluster => core_configure}/templates/AddNodeFile.j2 (100%) rename roles/{core/cluster => core_configure}/templates/ChangeFile.j2 (100%) rename roles/{core/cluster => core_configure}/templates/NewNodeFile.j2 (100%) rename roles/{core/cluster => core_configure}/templates/NodeClass.j2 (100%) rename roles/{core/cluster => core_configure}/templates/StanzaFile.j2 (100%) rename roles/{core/cluster => core_configure}/templates/StanzaFile_fs.j2 (100%) rename roles/{core/cluster => core_configure}/templates/StanzaFile_nsd.j2 (100%) rename roles/{core/common => core_configure}/tests/inventory (100%) rename roles/{core/common => core_configure}/tests/test.yml (100%) rename roles/{core/cluster => core_configure}/vars/main.yml (100%) rename roles/{core/node => core_install}/defaults/main.yml (100%) rename roles/{core/node => core_install}/handlers/main.yml (100%) rename roles/{core/node => core_install}/meta/main.yml (100%) rename roles/{core/node => core_install}/tasks/apt/install.yml (100%) rename roles/{core/node => core_install}/tasks/build.yml (100%) rename roles/{core/node => core_install}/tasks/finalize.yml (100%) rename roles/{core/node => core_install}/tasks/install.yml (100%) rename roles/{core/node => core_install}/tasks/install_dir_pkg.yml (100%) rename roles/{core/node => core_install}/tasks/install_gplbin.yml (100%) rename roles/{core/node => core_install}/tasks/install_license_pkg.yml (100%) rename roles/{core/node => core_install}/tasks/install_license_repository.yml (100%) rename roles/{core/node => core_install}/tasks/install_local_pkg.yml (100%) rename roles/{core/node => core_install}/tasks/install_remote_pkg.yml (100%) rename roles/{core/node => core_install}/tasks/install_repository.yml (100%) rename roles/{core/node => core_install}/tasks/main.yml (100%) rename roles/{core/node => core_install}/tasks/update.yml (100%) rename roles/{core/node => core_install}/tasks/upgrade.yml (100%) rename roles/{core/node => core_install}/tasks/yum/install.yml (100%) rename roles/{core/node => core_install}/tasks/zypper/install.yml (100%) rename roles/{core/node => core_install}/templates/AddNodeFile.j2 (100%) rename roles/{core/node => core_install}/templates/ChangeFile.j2 (100%) rename roles/{core/node => core_install}/templates/NewNodeFile.j2 (100%) rename roles/{core/node => core_install}/templates/NodeClass.j2 (100%) rename roles/{core/node => core_install}/templates/StanzaFile.j2 (100%) rename roles/{core/node => core_install}/tests/inventory (100%) rename roles/{core/node => core_install}/tests/test.yml (100%) rename roles/{core/node => core_install}/vars/main.yml (100%) rename roles/{core/precheck => core_prepare}/defaults/main.yml (100%) rename roles/{core/precheck => core_prepare}/handlers/main.yml (100%) rename roles/{core/precheck => core_prepare}/meta/main.yml (100%) rename roles/{core/precheck => core_prepare}/tasks/main.yml (100%) rename roles/{core/precheck => core_prepare}/tasks/prepare.yml (100%) rename roles/{core/postcheck => core_prepare}/tests/inventory (100%) rename roles/{core/postcheck => core_prepare}/tests/test.yml (100%) rename roles/{core/precheck => core_prepare}/vars/main.yml (100%) rename roles/{core/upgrade => core_upgrade}/defaults/main.yml (100%) rename roles/{core/upgrade => core_upgrade}/handlers/main.yml (100%) rename roles/{core/upgrade => core_upgrade}/meta/main.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/apt/install.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/build.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/finalize.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_gplbin.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_license_pkg.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_license_repository.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/install_repository.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/main.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/yum/install.yml (100%) rename roles/{core/upgrade => core_upgrade}/tasks/zypper/install.yml (100%) rename roles/{core/precheck => core_upgrade}/tests/inventory (100%) rename roles/{core/precheck => core_upgrade}/tests/test.yml (100%) rename roles/{core/upgrade => core_upgrade}/vars/main.yml (100%) rename roles/{core/postcheck => core_verify}/defaults/main.yml (100%) rename roles/{core/postcheck => core_verify}/handlers/main.yml (100%) rename roles/{core/postcheck => core_verify}/meta/main.yml (100%) rename roles/{core/postcheck => core_verify}/tasks/main.yml (100%) rename roles/{core/upgrade => core_verify}/tests/inventory (100%) rename roles/{core/upgrade => core_verify}/tests/test.yml (100%) rename roles/{core/postcheck => core_verify}/vars/main.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/defaults/main.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/handlers/main.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/meta/main.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/tasks/create_filesystem.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/tasks/create_recoverygroup.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/tasks/create_vdisk.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/tasks/main.yml (100%) rename roles/{remote_mount => ece_configure}/tests/inventory (100%) rename roles/{scale_ece/cluster => ece_configure}/tests/test.yml (100%) rename roles/{scale_ece/cluster => ece_configure}/vars/main.yml (100%) rename roles/{scale_ece/node => ece_install}/defaults/main.yml (100%) rename roles/{nfs/node => ece_install}/handlers/main.yml (100%) rename roles/{scale_ece/node => ece_install}/meta/main.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/install.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/install_local_pkg.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/install_repository.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/main.yml (100%) rename roles/{scale_ece/node => ece_install}/tasks/yum/install.yml (100%) rename roles/{scale_ece/node => ece_install}/vars/main.yml (100%) rename roles/{scale_ece/precheck => ece_prepare}/meta/main.yml (100%) rename roles/{scale_ece/precheck => ece_prepare}/tasks/check.yml (100%) rename roles/{scale_ece/precheck => ece_prepare}/tasks/main.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/defaults/main.yml (100%) rename roles/{nfs/upgrade => ece_upgrade}/handlers/main.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/meta/main.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/install.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/install_repository.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/main.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/tasks/yum/install.yml (100%) rename roles/{scale_ece/upgrade => ece_upgrade}/vars/main.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/defaults/main.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/handlers/main.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/meta/main.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/tasks/configure.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/tasks/configure_fal.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/tasks/main.yml (100%) rename roles/{scale_ece/cluster => fal_configure}/tests/inventory (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/tests/test.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_configure}/vars/main.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/defaults/main.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/meta/main.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/apt/install.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/install.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/install_local_pkg.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/install_repository.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/main.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/yum/install.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tasks/zypper/install.yml (100%) rename roles/{scale_fileauditlogging/cluster => fal_install}/tests/inventory (100%) rename roles/{scale_fileauditlogging/node => fal_install}/tests/test.yml (100%) rename roles/{scale_fileauditlogging/node => fal_install}/vars/main.yml (100%) rename roles/{scale_fileauditlogging/precheck => fal_prepare}/defaults/main.yml (100%) rename roles/{scale_fileauditlogging/precheck => fal_prepare}/handlers/main.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_prepare}/meta/main.yml (100%) rename roles/{scale_fileauditlogging/precheck => fal_prepare}/tasks/main.yml (100%) rename roles/{scale_fileauditlogging/node => fal_prepare}/tests/inventory (100%) rename roles/{scale_fileauditlogging/precheck => fal_prepare}/tests/test.yml (100%) rename roles/{scale_fileauditlogging/precheck => fal_prepare}/vars/main.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/defaults/main.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/handlers/main.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/meta/main.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/apt/install.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/install.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/install_repository.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/main.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/yum/install.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tasks/zypper/install.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_upgrade}/tests/inventory (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/tests/test.yml (100%) rename roles/{scale_fileauditlogging/upgrade => fal_upgrade}/vars/main.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_verify}/defaults/main.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_verify}/handlers/main.yml (100%) rename roles/{scale_fileauditlogging/precheck => fal_verify}/meta/main.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_verify}/tasks/check.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_verify}/tasks/main.yml (100%) rename roles/{scale_fileauditlogging/precheck => fal_verify}/tests/inventory (100%) rename roles/{scale_fileauditlogging/postcheck => fal_verify}/tests/test.yml (100%) rename roles/{scale_fileauditlogging/postcheck => fal_verify}/vars/main.yml (100%) rename roles/{gui/cluster => gui_configure}/defaults/main.yml (100%) rename roles/{gui/cluster => gui_configure}/meta/main.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/chpasswdpolicy.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/configure.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/email.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/hasi_vault_certificate.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/hasi_vault_user.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/ldap.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/main.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/snmp.yml (100%) rename roles/{gui/cluster => gui_configure}/tasks/users.yml (100%) rename roles/{gui/node => gui_install}/defaults/main.yml (100%) rename roles/{gui/node => gui_install}/meta/main.yml (100%) rename roles/{gui/node => gui_install}/tasks/apt/install.yml (100%) rename roles/{gui/node => gui_install}/tasks/install.yml (100%) rename roles/{gui/node => gui_install}/tasks/install_dir_pkg.yml (100%) rename roles/{gui/node => gui_install}/tasks/install_local_pkg.yml (100%) rename roles/{gui/node => gui_install}/tasks/install_remote_pkg.yml (100%) rename roles/{gui/node => gui_install}/tasks/install_repository.yml (100%) rename roles/{gui/node => gui_install}/tasks/main.yml (100%) rename roles/{gui/node => gui_install}/tasks/yum/install.yml (100%) rename roles/{gui/node => gui_install}/tasks/zypper/install.yml (100%) rename roles/{gui/node => gui_install}/vars/main.yml (100%) rename roles/{gui/precheck => gui_prepare}/defaults/main.yml (100%) rename roles/{gui/precheck => gui_prepare}/meta/main.yml (100%) rename roles/{gui/precheck => gui_prepare}/tasks/inventory_check.yml (100%) rename roles/{gui/precheck => gui_prepare}/tasks/main.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/defaults/main.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/meta/main.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/apt/install.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/install.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/install_repository.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/main.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/yum/install.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/tasks/zypper/install.yml (100%) rename roles/{gui/upgrade => gui_upgrade}/vars/main.yml (100%) rename roles/{gui/postcheck => gui_verify}/defaults/main.yml (100%) rename roles/{gui/postcheck => gui_verify}/meta/main.yml (100%) rename roles/{gui/postcheck => gui_verify}/tasks/main.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/.travis.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/defaults/main.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/meta/main.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/tasks/append_dict.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/tasks/configure.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/tasks/env_setup.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/tasks/main.yml (100%) rename roles/{scale_hdfs/cluster => hdfs_configure}/vars/main.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/.travis.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/defaults/main.yml (100%) rename roles/{scale_ece/node => hdfs_install}/handlers/main.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/meta/main.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/install.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/install_local_pkg.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/install_repository.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/main.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/prepare_env.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/tasks/yum/install.yml (100%) rename roles/{scale_hdfs/node => hdfs_install}/vars/main.yml (100%) rename roles/{scale_hdfs/postcheck => hdfs_prepare}/.travis.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_prepare}/defaults/main.yml (100%) rename roles/{nfs/postcheck => hdfs_prepare}/meta/main.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/check.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/java_home.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/main.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_prepare}/tasks/prepare_env.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_prepare}/vars/main.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/defaults/main.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/handlers/mail.yml (100%) rename roles/{scale_auth/upgrade => hdfs_upgrade}/meta/main.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/main.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/prepare_env.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/upgrade.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/upgrade_dir_pkg.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/upgrade_local_pkg.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/upgrade_remote_pkg.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/upgrade_repository.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/tasks/yum/upgrade.yml (100%) rename roles/{scale_hdfs/upgrade => hdfs_upgrade}/vars/main.yml (100%) rename roles/{scale_hdfs/precheck => hdfs_verify}/.travis.yml (100%) rename roles/{scale_hdfs/postcheck => hdfs_verify}/defaults/main.yml (100%) rename roles/{nfs/precheck => hdfs_verify}/meta/main.yml (100%) rename roles/{scale_hdfs/postcheck => hdfs_verify}/tasks/check.yml (100%) rename roles/{scale_hdfs/postcheck => hdfs_verify}/tasks/main.yml (100%) rename roles/{scale_hdfs/postcheck => hdfs_verify}/vars/main.yml (100%) rename roles/{nfs/cluster => nfs_configure}/defaults/main.yml (100%) rename roles/{nfs/cluster => nfs_configure}/meta/main.yml (100%) rename roles/{nfs/cluster => nfs_configure}/tasks/configure.yml (100%) rename roles/{nfs/cluster => nfs_configure}/tasks/main.yml (100%) rename roles/{nfs/cluster => nfs_configure}/vars/main.yml (100%) rename roles/{nfs/node => nfs_install}/defaults/main.yml (100%) rename roles/{scale_ece/upgrade => nfs_install}/handlers/main.yml (100%) rename roles/{nfs/node => nfs_install}/meta/main.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/apt/install.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/install.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/install_dir_pkg.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/install_local_pkg.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/install_remote_pkg.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/install_repository.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/main.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/yum/install.yml (100%) rename roles/{nfs/node => nfs_install}/tasks/zypper/install.yml (100%) rename roles/{nfs/node => nfs_install}/vars/main.yml (100%) rename roles/{scale_hdfs/postcheck => nfs_prepare}/meta/main.yml (100%) rename roles/{nfs/precheck => nfs_prepare}/tasks/check.yml (100%) rename roles/{nfs/precheck => nfs_prepare}/tasks/main.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/defaults/main.yml (100%) rename roles/{scale_hdfs/node => nfs_upgrade}/handlers/main.yml (100%) rename roles/{scale_hdfs/upgrade => nfs_upgrade}/meta/main.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/apt/install.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/install.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/install_repository.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/main.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/yum/install.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/tasks/zypper/install.yml (100%) rename roles/{nfs/upgrade => nfs_upgrade}/vars/main.yml (100%) rename roles/{nfs/postcheck => nfs_verify}/defaults/main.yml (100%) rename roles/{scale_hdfs/precheck => nfs_verify}/meta/main.yml (100%) rename roles/{nfs/postcheck => nfs_verify}/tasks/check.yml (100%) rename roles/{nfs/postcheck => nfs_verify}/tasks/main.yml (100%) rename roles/{nfs/postcheck => nfs_verify}/vars/main.yml (100%) rename roles/{scale_object/cluster => obj_configure}/defaults/main.yml (100%) rename roles/{scale_object/cluster => obj_configure}/meta/main.yml (100%) rename roles/{scale_object/cluster => obj_configure}/tasks/configure.yml (100%) rename roles/{scale_object/cluster => obj_configure}/tasks/configure_pmswift.yml (100%) rename roles/{scale_object/cluster => obj_configure}/tasks/main.yml (100%) rename roles/{scale_object/cluster => obj_configure}/templates/obj_passwd.j2 (100%) rename roles/{scale_object/cluster => obj_configure}/vars/main.yml (100%) rename roles/{scale_object/node => obj_install}/defaults/main.yml (100%) rename roles/{scale_object/node => obj_install}/handlers/main.yml (100%) rename roles/{scale_object/node => obj_install}/meta/main.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/install.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/install_local_pkg.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/install_pmswift.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/install_repository.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/main.yml (100%) rename roles/{scale_object/node => obj_install}/tasks/yum/install.yml (100%) rename roles/{scale_object/node => obj_install}/vars/main.yml (100%) rename roles/{scale_object/precheck => obj_prepare}/default/main.yml (100%) rename roles/{scale_object/precheck => obj_prepare}/meta/main.yml (100%) rename roles/{scale_object/precheck => obj_prepare}/tasks/check.yml (100%) rename roles/{scale_object/precheck => obj_prepare}/tasks/inventory_check.yml (100%) rename roles/{scale_object/precheck => obj_prepare}/tasks/main.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/defaults/main.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/handlers/main.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/meta/main.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/install.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/install_pmswift.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/install_repository.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/main.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/tasks/yum/install.yml (100%) rename roles/{scale_object/upgrade => obj_upgrade}/vars/main.yml (100%) rename roles/{scale_object/postcheck => obj_verify}/meta/main.yml (100%) rename roles/{scale_object/postcheck => obj_verify}/tasks/check.yml (100%) rename roles/{scale_object/postcheck => obj_verify}/tasks/main.yml (100%) rename roles/{scale_object/postcheck => obj_verify}/vars/main.yml (100%) rename roles/{zimon/cluster => perfmon_configure}/defaults/main.yml (100%) rename roles/{zimon/cluster => perfmon_configure}/meta/main.yml (100%) rename roles/{zimon/cluster => perfmon_configure}/tasks/configure.yml (100%) rename roles/{zimon/cluster => perfmon_configure}/tasks/main.yml (100%) rename roles/{zimon/node => perfmon_install}/defaults/main.yml (100%) rename roles/{zimon/node => perfmon_install}/meta/main.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/apt/install.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/install.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/install_dir_pkg.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/install_local_pkg.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/install_remote_pkg.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/install_repository.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/main.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/yum/install.yml (100%) rename roles/{zimon/node => perfmon_install}/tasks/zypper/install.yml (100%) rename roles/{zimon/node => perfmon_install}/vars/main.yml (100%) rename roles/{zimon/precheck => perfmon_prepare}/defaults/main.yml (100%) rename roles/{zimon/precheck => perfmon_prepare}/meta/main.yml (100%) rename roles/{zimon/precheck => perfmon_prepare}/tasks/main.yml (100%) rename roles/{zimon/precheck => perfmon_prepare}/vars/main.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/defaults/main.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/meta/main.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/apt/install.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/install.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/install_repository.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/main.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/yum/install.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/tasks/zypper/install.yml (100%) rename roles/{zimon/upgrade => perfmon_upgrade}/vars/main.yml (100%) rename roles/{zimon/postcheck => perfmon_verify}/defaults/main.yml (100%) rename roles/{zimon/postcheck => perfmon_verify}/meta/main.yml (100%) rename roles/{zimon/postcheck => perfmon_verify}/tasks/main.yml (100%) rename roles/{remote_mount => remotemount_configure}/.yamllint (100%) rename roles/{remote_mount => remotemount_configure}/README.md (100%) rename roles/{remote_mount => remotemount_configure}/defaults/main.yml (100%) rename roles/{remote_mount => remotemount_configure}/handlers/main.yml (100%) rename roles/{remote_mount => remotemount_configure}/meta/main.yml (100%) rename roles/{remote_mount => remotemount_configure}/molecule/default/INSTALL.rst (100%) rename roles/{remote_mount => remotemount_configure}/molecule/default/converge.yml (100%) rename roles/{remote_mount => remotemount_configure}/molecule/default/molecule.yml (100%) rename roles/{remote_mount => remotemount_configure}/molecule/default/verify.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/cleanup_filesystem_api_cli.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/cleanup_filesystems.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/cleanup_remote_mount.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/cleanup_remote_mount_api_cli.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/delete_remote_cluster.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/main.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/mount_filesystem_api_cli.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/mount_filesystems.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/precheck.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/remotecluster.yml (100%) rename roles/{remote_mount => remotemount_configure}/tasks/remotecluster_api_cli.yml (100%) rename roles/{scale_fileauditlogging/upgrade => remotemount_configure}/tests/inventory (100%) rename roles/{remote_mount => remotemount_configure}/tests/test.yml (100%) rename roles/{remote_mount => remotemount_configure}/vars/main.yml (100%) rename roles/{smb/cluster => smb_configure}/defaults/main.yml (100%) rename roles/{smb/cluster => smb_configure}/meta/main.yml (100%) rename roles/{smb/cluster => smb_configure}/tasks/configure.yml (100%) rename roles/{smb/cluster => smb_configure}/tasks/main.yml (100%) rename roles/{smb/cluster => smb_configure}/vars/main.yml (100%) rename roles/{smb/node => smb_install}/defaults/main.yml (100%) rename roles/{smb/node => smb_install}/handlers/main.yml (100%) rename roles/{smb/node => smb_install}/meta/main.yml (100%) rename roles/{smb/node => smb_install}/tasks/apt/install.yml (100%) rename roles/{smb/node => smb_install}/tasks/install.yml (100%) rename roles/{smb/node => smb_install}/tasks/install_dir_pkg.yml (100%) rename roles/{smb/node => smb_install}/tasks/install_local_pkg.yml (100%) rename roles/{smb/node => smb_install}/tasks/install_remote_pkg.yml (100%) rename roles/{smb/node => smb_install}/tasks/install_repository.yml (100%) rename roles/{smb/node => smb_install}/tasks/main.yml (100%) rename roles/{smb/node => smb_install}/tasks/yum/install.yml (100%) rename roles/{smb/node => smb_install}/tasks/zypper/install.yml (100%) rename roles/{smb/node => smb_install}/vars/main.yml (100%) rename roles/{smb/postcheck => smb_prepare}/meta/main.yml (100%) rename roles/{smb/precheck => smb_prepare}/tasks/check.yml (100%) rename roles/{smb/precheck => smb_prepare}/tasks/main.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/defaults/main.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/handlers/main.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/meta/main.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/apt/install.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/install.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/install_dir_pkg.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/install_local_pkg.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/install_remote_pkg.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/install_repository.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/main.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/yum/install.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/tasks/zypper/install.yml (100%) rename roles/{smb/upgrade => smb_upgrade}/vars/main.yml (100%) rename roles/{smb/postcheck => smb_verify}/defaults/main.yml (100%) rename roles/{smb/precheck => smb_verify}/meta/main.yml (100%) rename roles/{smb/postcheck => smb_verify}/tasks/check.yml (100%) rename roles/{smb/postcheck => smb_verify}/tasks/main.yml (100%) rename roles/{smb/postcheck => smb_verify}/vars/main.yml (100%) diff --git a/roles/scale_hpt/node/defaults/main.yml b/roles/afm_cos_install/defaults/main.yml similarity index 100% rename from roles/scale_hpt/node/defaults/main.yml rename to roles/afm_cos_install/defaults/main.yml diff --git a/roles/scale_hpt/node/meta/main.yml b/roles/afm_cos_install/meta/main.yml similarity index 100% rename from roles/scale_hpt/node/meta/main.yml rename to roles/afm_cos_install/meta/main.yml diff --git a/roles/scale_hpt/node/tasks/apt/install.yml b/roles/afm_cos_install/tasks/apt/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/apt/install.yml rename to roles/afm_cos_install/tasks/apt/install.yml diff --git a/roles/scale_hpt/node/tasks/install.yml b/roles/afm_cos_install/tasks/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install.yml rename to roles/afm_cos_install/tasks/install.yml diff --git a/roles/scale_hpt/node/tasks/install_dir_pkg.yml b/roles/afm_cos_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install_dir_pkg.yml rename to roles/afm_cos_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_hpt/node/tasks/install_local_pkg.yml b/roles/afm_cos_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install_local_pkg.yml rename to roles/afm_cos_install/tasks/install_local_pkg.yml diff --git a/roles/scale_hpt/node/tasks/install_repository.yml b/roles/afm_cos_install/tasks/install_repository.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install_repository.yml rename to roles/afm_cos_install/tasks/install_repository.yml diff --git a/roles/scale_hpt/node/tasks/main.yml b/roles/afm_cos_install/tasks/main.yml similarity index 100% rename from roles/scale_hpt/node/tasks/main.yml rename to roles/afm_cos_install/tasks/main.yml diff --git a/roles/scale_hpt/node/tasks/yum/install.yml b/roles/afm_cos_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/yum/install.yml rename to roles/afm_cos_install/tasks/yum/install.yml diff --git a/roles/scale_hpt/node/tasks/zypper/install.yml b/roles/afm_cos_install/tasks/zypper/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/zypper/install.yml rename to roles/afm_cos_install/tasks/zypper/install.yml diff --git a/roles/scale_hpt/node/vars/main.yml b/roles/afm_cos_install/vars/main.yml similarity index 100% rename from roles/scale_hpt/node/vars/main.yml rename to roles/afm_cos_install/vars/main.yml diff --git a/roles/scale_hpt/postcheck/defaults/main.yml b/roles/afm_cos_prepare/defaults/main.yml similarity index 100% rename from roles/scale_hpt/postcheck/defaults/main.yml rename to roles/afm_cos_prepare/defaults/main.yml diff --git a/roles/scale_hpt/postcheck/meta/main.yml b/roles/afm_cos_prepare/meta/main.yml similarity index 100% rename from roles/scale_hpt/postcheck/meta/main.yml rename to roles/afm_cos_prepare/meta/main.yml diff --git a/roles/scale_hpt/postcheck/tasks/main.yml b/roles/afm_cos_prepare/tasks/main.yml similarity index 100% rename from roles/scale_hpt/postcheck/tasks/main.yml rename to roles/afm_cos_prepare/tasks/main.yml diff --git a/roles/scale_hpt/upgrade/defaults/main.yml b/roles/afm_cos_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/defaults/main.yml rename to roles/afm_cos_upgrade/defaults/main.yml diff --git a/roles/scale_hpt/upgrade/meta/main.yml b/roles/afm_cos_upgrade/meta/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/meta/main.yml rename to roles/afm_cos_upgrade/meta/main.yml diff --git a/roles/scale_hpt/upgrade/tasks/apt/install.yml b/roles/afm_cos_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/apt/install.yml rename to roles/afm_cos_upgrade/tasks/apt/install.yml diff --git a/roles/scale_hpt/upgrade/tasks/install.yml b/roles/afm_cos_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install.yml rename to roles/afm_cos_upgrade/tasks/install.yml diff --git a/roles/scale_hpt/upgrade/tasks/install_dir_pkg.yml b/roles/afm_cos_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install_dir_pkg.yml rename to roles/afm_cos_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/scale_hpt/upgrade/tasks/install_local_pkg.yml b/roles/afm_cos_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install_local_pkg.yml rename to roles/afm_cos_upgrade/tasks/install_local_pkg.yml diff --git a/roles/scale_hpt/upgrade/tasks/install_repository.yml b/roles/afm_cos_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install_repository.yml rename to roles/afm_cos_upgrade/tasks/install_repository.yml diff --git a/roles/scale_hpt/upgrade/tasks/main.yml b/roles/afm_cos_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/main.yml rename to roles/afm_cos_upgrade/tasks/main.yml diff --git a/roles/scale_hpt/upgrade/tasks/yum/install.yml b/roles/afm_cos_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/yum/install.yml rename to roles/afm_cos_upgrade/tasks/yum/install.yml diff --git a/roles/scale_hpt/upgrade/tasks/zypper/install.yml b/roles/afm_cos_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/zypper/install.yml rename to roles/afm_cos_upgrade/tasks/zypper/install.yml diff --git a/roles/scale_hpt/upgrade/vars/main.yml b/roles/afm_cos_upgrade/vars/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/vars/main.yml rename to roles/afm_cos_upgrade/vars/main.yml diff --git a/roles/scale_hpt/precheck/defaults/main.yml b/roles/afm_cos_verify/defaults/main.yml similarity index 100% rename from roles/scale_hpt/precheck/defaults/main.yml rename to roles/afm_cos_verify/defaults/main.yml diff --git a/roles/scale_hpt/precheck/meta/main.yml b/roles/afm_cos_verify/meta/main.yml similarity index 100% rename from roles/scale_hpt/precheck/meta/main.yml rename to roles/afm_cos_verify/meta/main.yml diff --git a/roles/scale_hpt/precheck/tasks/main.yml b/roles/afm_cos_verify/tasks/main.yml similarity index 100% rename from roles/scale_hpt/precheck/tasks/main.yml rename to roles/afm_cos_verify/tasks/main.yml diff --git a/roles/scale_auth/upgrade/defaults/main.yml b/roles/auth_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_auth/upgrade/defaults/main.yml rename to roles/auth_upgrade/defaults/main.yml diff --git a/roles/callhome/node/meta/main.yml b/roles/auth_upgrade/meta/main.yml old mode 100755 new mode 100644 similarity index 100% rename from roles/callhome/node/meta/main.yml rename to roles/auth_upgrade/meta/main.yml diff --git a/roles/scale_auth/upgrade/tasks/auth.yml b/roles/auth_upgrade/tasks/auth.yml similarity index 100% rename from roles/scale_auth/upgrade/tasks/auth.yml rename to roles/auth_upgrade/tasks/auth.yml diff --git a/roles/scale_auth/upgrade/tasks/main.yml b/roles/auth_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_auth/upgrade/tasks/main.yml rename to roles/auth_upgrade/tasks/main.yml diff --git a/roles/scale_auth/upgrade/tasks/parseFile.yml b/roles/auth_upgrade/tasks/parseFile.yml similarity index 100% rename from roles/scale_auth/upgrade/tasks/parseFile.yml rename to roles/auth_upgrade/tasks/parseFile.yml diff --git a/roles/scale_auth/upgrade/vars/main.yml b/roles/auth_upgrade/vars/main.yml similarity index 100% rename from roles/scale_auth/upgrade/vars/main.yml rename to roles/auth_upgrade/vars/main.yml diff --git a/roles/callhome/cluster/defaults/main.yml b/roles/callhome_configure/defaults/main.yml similarity index 100% rename from roles/callhome/cluster/defaults/main.yml rename to roles/callhome_configure/defaults/main.yml diff --git a/roles/callhome/cluster/handlers/main.yml b/roles/callhome_configure/handlers/main.yml similarity index 100% rename from roles/callhome/cluster/handlers/main.yml rename to roles/callhome_configure/handlers/main.yml diff --git a/roles/callhome/cluster/meta/main.yml b/roles/callhome_configure/meta/main.yml similarity index 100% rename from roles/callhome/cluster/meta/main.yml rename to roles/callhome_configure/meta/main.yml diff --git a/roles/callhome/cluster/tasks/configure.yml b/roles/callhome_configure/tasks/configure.yml similarity index 100% rename from roles/callhome/cluster/tasks/configure.yml rename to roles/callhome_configure/tasks/configure.yml diff --git a/roles/callhome/cluster/tasks/main.yml b/roles/callhome_configure/tasks/main.yml similarity index 100% rename from roles/callhome/cluster/tasks/main.yml rename to roles/callhome_configure/tasks/main.yml diff --git a/roles/callhome/cluster/vars/main.yml b/roles/callhome_configure/vars/main.yml similarity index 100% rename from roles/callhome/cluster/vars/main.yml rename to roles/callhome_configure/vars/main.yml diff --git a/roles/callhome/node/defaults/main.yml b/roles/callhome_install/defaults/main.yml similarity index 100% rename from roles/callhome/node/defaults/main.yml rename to roles/callhome_install/defaults/main.yml diff --git a/roles/callhome/node/handlers/main.yml b/roles/callhome_install/handlers/main.yml similarity index 100% rename from roles/callhome/node/handlers/main.yml rename to roles/callhome_install/handlers/main.yml diff --git a/roles/nfs/common/meta/main.yml b/roles/callhome_install/meta/main.yml old mode 100644 new mode 100755 similarity index 100% rename from roles/nfs/common/meta/main.yml rename to roles/callhome_install/meta/main.yml diff --git a/roles/callhome/node/tasks/apt/install.yml b/roles/callhome_install/tasks/apt/install.yml similarity index 100% rename from roles/callhome/node/tasks/apt/install.yml rename to roles/callhome_install/tasks/apt/install.yml diff --git a/roles/callhome/node/tasks/install.yml b/roles/callhome_install/tasks/install.yml similarity index 100% rename from roles/callhome/node/tasks/install.yml rename to roles/callhome_install/tasks/install.yml diff --git a/roles/callhome/node/tasks/install_local_pkg.yml b/roles/callhome_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/callhome/node/tasks/install_local_pkg.yml rename to roles/callhome_install/tasks/install_local_pkg.yml diff --git a/roles/callhome/node/tasks/install_remote_pkg.yml b/roles/callhome_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/callhome/node/tasks/install_remote_pkg.yml rename to roles/callhome_install/tasks/install_remote_pkg.yml diff --git a/roles/callhome/node/tasks/install_repository.yml b/roles/callhome_install/tasks/install_repository.yml similarity index 100% rename from roles/callhome/node/tasks/install_repository.yml rename to roles/callhome_install/tasks/install_repository.yml diff --git a/roles/callhome/node/tasks/main.yml b/roles/callhome_install/tasks/main.yml similarity index 100% rename from roles/callhome/node/tasks/main.yml rename to roles/callhome_install/tasks/main.yml diff --git a/roles/callhome/node/tasks/yum/install.yml b/roles/callhome_install/tasks/yum/install.yml similarity index 100% rename from roles/callhome/node/tasks/yum/install.yml rename to roles/callhome_install/tasks/yum/install.yml diff --git a/roles/callhome/node/tasks/zypper/install.yml b/roles/callhome_install/tasks/zypper/install.yml similarity index 100% rename from roles/callhome/node/tasks/zypper/install.yml rename to roles/callhome_install/tasks/zypper/install.yml diff --git a/roles/callhome/node/vars/main.yml b/roles/callhome_install/vars/main.yml similarity index 100% rename from roles/callhome/node/vars/main.yml rename to roles/callhome_install/vars/main.yml diff --git a/roles/callhome/precheck/defaults/main.yml b/roles/callhome_prepare/defaults/main.yml similarity index 100% rename from roles/callhome/precheck/defaults/main.yml rename to roles/callhome_prepare/defaults/main.yml diff --git a/roles/callhome/precheck/handlers/main.yml b/roles/callhome_prepare/handlers/main.yml similarity index 100% rename from roles/callhome/precheck/handlers/main.yml rename to roles/callhome_prepare/handlers/main.yml diff --git a/roles/callhome/precheck/meta/main.yml b/roles/callhome_prepare/meta/main.yml similarity index 100% rename from roles/callhome/precheck/meta/main.yml rename to roles/callhome_prepare/meta/main.yml diff --git a/roles/callhome/precheck/tasks/check.yml b/roles/callhome_prepare/tasks/check.yml similarity index 100% rename from roles/callhome/precheck/tasks/check.yml rename to roles/callhome_prepare/tasks/check.yml diff --git a/roles/callhome/precheck/tasks/main.yml b/roles/callhome_prepare/tasks/main.yml similarity index 100% rename from roles/callhome/precheck/tasks/main.yml rename to roles/callhome_prepare/tasks/main.yml diff --git a/roles/callhome/postcheck/vars/main.yml b/roles/callhome_prepare/vars/main.yml similarity index 100% rename from roles/callhome/postcheck/vars/main.yml rename to roles/callhome_prepare/vars/main.yml diff --git a/roles/callhome/postcheck/defaults/main.yml b/roles/callhome_verify/defaults/main.yml similarity index 100% rename from roles/callhome/postcheck/defaults/main.yml rename to roles/callhome_verify/defaults/main.yml diff --git a/roles/callhome/postcheck/handlers/main.yml b/roles/callhome_verify/handlers/main.yml similarity index 100% rename from roles/callhome/postcheck/handlers/main.yml rename to roles/callhome_verify/handlers/main.yml diff --git a/roles/callhome/postcheck/meta/main.yml b/roles/callhome_verify/meta/main.yml similarity index 100% rename from roles/callhome/postcheck/meta/main.yml rename to roles/callhome_verify/meta/main.yml diff --git a/roles/callhome/postcheck/tasks/check.yml b/roles/callhome_verify/tasks/check.yml similarity index 100% rename from roles/callhome/postcheck/tasks/check.yml rename to roles/callhome_verify/tasks/check.yml diff --git a/roles/callhome/postcheck/tasks/main.yml b/roles/callhome_verify/tasks/main.yml similarity index 100% rename from roles/callhome/postcheck/tasks/main.yml rename to roles/callhome_verify/tasks/main.yml diff --git a/roles/callhome/precheck/vars/main.yml b/roles/callhome_verify/vars/main.yml similarity index 100% rename from roles/callhome/precheck/vars/main.yml rename to roles/callhome_verify/vars/main.yml diff --git a/roles/nfs/common/defaults/main.yml b/roles/ces_common/defaults/main.yml similarity index 100% rename from roles/nfs/common/defaults/main.yml rename to roles/ces_common/defaults/main.yml diff --git a/roles/nfs/upgrade/meta/main.yml b/roles/ces_common/meta/main.yml similarity index 100% rename from roles/nfs/upgrade/meta/main.yml rename to roles/ces_common/meta/main.yml diff --git a/roles/nfs/common/tasks/check.yml b/roles/ces_common/tasks/check.yml similarity index 100% rename from roles/nfs/common/tasks/check.yml rename to roles/ces_common/tasks/check.yml diff --git a/roles/nfs/common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml similarity index 100% rename from roles/nfs/common/tasks/configure.yml rename to roles/ces_common/tasks/configure.yml diff --git a/roles/nfs/common/tasks/main.yml b/roles/ces_common/tasks/main.yml similarity index 100% rename from roles/nfs/common/tasks/main.yml rename to roles/ces_common/tasks/main.yml diff --git a/roles/nfs/common/vars/main.yml b/roles/ces_common/vars/main.yml similarity index 100% rename from roles/nfs/common/vars/main.yml rename to roles/ces_common/vars/main.yml diff --git a/roles/core/common/defaults/main.yml b/roles/core_common/defaults/main.yml similarity index 100% rename from roles/core/common/defaults/main.yml rename to roles/core_common/defaults/main.yml diff --git a/roles/core/common/handlers/main.yml b/roles/core_common/handlers/main.yml similarity index 100% rename from roles/core/common/handlers/main.yml rename to roles/core_common/handlers/main.yml diff --git a/roles/core/cluster/meta/main.yml b/roles/core_common/meta/main.yml similarity index 100% rename from roles/core/cluster/meta/main.yml rename to roles/core_common/meta/main.yml diff --git a/roles/core/common/tasks/apt/set_vars.yml b/roles/core_common/tasks/apt/set_vars.yml similarity index 100% rename from roles/core/common/tasks/apt/set_vars.yml rename to roles/core_common/tasks/apt/set_vars.yml diff --git a/roles/core/common/tasks/check.yml b/roles/core_common/tasks/check.yml similarity index 100% rename from roles/core/common/tasks/check.yml rename to roles/core_common/tasks/check.yml diff --git a/roles/core/common/tasks/main.yml b/roles/core_common/tasks/main.yml similarity index 100% rename from roles/core/common/tasks/main.yml rename to roles/core_common/tasks/main.yml diff --git a/roles/core/common/tasks/yum/set_vars.yml b/roles/core_common/tasks/yum/set_vars.yml similarity index 100% rename from roles/core/common/tasks/yum/set_vars.yml rename to roles/core_common/tasks/yum/set_vars.yml diff --git a/roles/core/common/tasks/zypper/set_vars.yml b/roles/core_common/tasks/zypper/set_vars.yml similarity index 100% rename from roles/core/common/tasks/zypper/set_vars.yml rename to roles/core_common/tasks/zypper/set_vars.yml diff --git a/roles/core/cluster/tests/inventory b/roles/core_common/tests/inventory similarity index 100% rename from roles/core/cluster/tests/inventory rename to roles/core_common/tests/inventory diff --git a/roles/core/cluster/tests/test.yml b/roles/core_common/tests/test.yml similarity index 100% rename from roles/core/cluster/tests/test.yml rename to roles/core_common/tests/test.yml diff --git a/roles/core/common/vars/main.yml b/roles/core_common/vars/main.yml similarity index 100% rename from roles/core/common/vars/main.yml rename to roles/core_common/vars/main.yml diff --git a/roles/core/cluster/defaults/main.yml b/roles/core_configure/defaults/main.yml similarity index 100% rename from roles/core/cluster/defaults/main.yml rename to roles/core_configure/defaults/main.yml diff --git a/roles/core/cluster/handlers/main.yml b/roles/core_configure/handlers/main.yml similarity index 100% rename from roles/core/cluster/handlers/main.yml rename to roles/core_configure/handlers/main.yml diff --git a/roles/core/common/meta/main.yml b/roles/core_configure/meta/main.yml similarity index 100% rename from roles/core/common/meta/main.yml rename to roles/core_configure/meta/main.yml diff --git a/roles/core/cluster/tasks/check.yml b/roles/core_configure/tasks/check.yml similarity index 100% rename from roles/core/cluster/tasks/check.yml rename to roles/core_configure/tasks/check.yml diff --git a/roles/core/cluster/tasks/cluster.yml b/roles/core_configure/tasks/cluster.yml similarity index 100% rename from roles/core/cluster/tasks/cluster.yml rename to roles/core_configure/tasks/cluster.yml diff --git a/roles/core/cluster/tasks/cluster_start.yml b/roles/core_configure/tasks/cluster_start.yml similarity index 100% rename from roles/core/cluster/tasks/cluster_start.yml rename to roles/core_configure/tasks/cluster_start.yml diff --git a/roles/core/cluster/tasks/config.yml b/roles/core_configure/tasks/config.yml similarity index 100% rename from roles/core/cluster/tasks/config.yml rename to roles/core_configure/tasks/config.yml diff --git a/roles/core/cluster/tasks/finalize.yml b/roles/core_configure/tasks/finalize.yml similarity index 100% rename from roles/core/cluster/tasks/finalize.yml rename to roles/core_configure/tasks/finalize.yml diff --git a/roles/core/cluster/tasks/install_gplbin.yml b/roles/core_configure/tasks/install_gplbin.yml similarity index 100% rename from roles/core/cluster/tasks/install_gplbin.yml rename to roles/core_configure/tasks/install_gplbin.yml diff --git a/roles/core/cluster/tasks/main.yml b/roles/core_configure/tasks/main.yml similarity index 100% rename from roles/core/cluster/tasks/main.yml rename to roles/core_configure/tasks/main.yml diff --git a/roles/core/cluster/tasks/removenode.yml b/roles/core_configure/tasks/removenode.yml similarity index 100% rename from roles/core/cluster/tasks/removenode.yml rename to roles/core_configure/tasks/removenode.yml diff --git a/roles/core/cluster/tasks/storage.yml b/roles/core_configure/tasks/storage.yml similarity index 100% rename from roles/core/cluster/tasks/storage.yml rename to roles/core_configure/tasks/storage.yml diff --git a/roles/core/cluster/tasks/storage_disk.yml b/roles/core_configure/tasks/storage_disk.yml similarity index 100% rename from roles/core/cluster/tasks/storage_disk.yml rename to roles/core_configure/tasks/storage_disk.yml diff --git a/roles/core/cluster/tasks/storage_fs.yml b/roles/core_configure/tasks/storage_fs.yml similarity index 100% rename from roles/core/cluster/tasks/storage_fs.yml rename to roles/core_configure/tasks/storage_fs.yml diff --git a/roles/core/cluster/templates/AddNodeFile.j2 b/roles/core_configure/templates/AddNodeFile.j2 similarity index 100% rename from roles/core/cluster/templates/AddNodeFile.j2 rename to roles/core_configure/templates/AddNodeFile.j2 diff --git a/roles/core/cluster/templates/ChangeFile.j2 b/roles/core_configure/templates/ChangeFile.j2 similarity index 100% rename from roles/core/cluster/templates/ChangeFile.j2 rename to roles/core_configure/templates/ChangeFile.j2 diff --git a/roles/core/cluster/templates/NewNodeFile.j2 b/roles/core_configure/templates/NewNodeFile.j2 similarity index 100% rename from roles/core/cluster/templates/NewNodeFile.j2 rename to roles/core_configure/templates/NewNodeFile.j2 diff --git a/roles/core/cluster/templates/NodeClass.j2 b/roles/core_configure/templates/NodeClass.j2 similarity index 100% rename from roles/core/cluster/templates/NodeClass.j2 rename to roles/core_configure/templates/NodeClass.j2 diff --git a/roles/core/cluster/templates/StanzaFile.j2 b/roles/core_configure/templates/StanzaFile.j2 similarity index 100% rename from roles/core/cluster/templates/StanzaFile.j2 rename to roles/core_configure/templates/StanzaFile.j2 diff --git a/roles/core/cluster/templates/StanzaFile_fs.j2 b/roles/core_configure/templates/StanzaFile_fs.j2 similarity index 100% rename from roles/core/cluster/templates/StanzaFile_fs.j2 rename to roles/core_configure/templates/StanzaFile_fs.j2 diff --git a/roles/core/cluster/templates/StanzaFile_nsd.j2 b/roles/core_configure/templates/StanzaFile_nsd.j2 similarity index 100% rename from roles/core/cluster/templates/StanzaFile_nsd.j2 rename to roles/core_configure/templates/StanzaFile_nsd.j2 diff --git a/roles/core/common/tests/inventory b/roles/core_configure/tests/inventory similarity index 100% rename from roles/core/common/tests/inventory rename to roles/core_configure/tests/inventory diff --git a/roles/core/common/tests/test.yml b/roles/core_configure/tests/test.yml similarity index 100% rename from roles/core/common/tests/test.yml rename to roles/core_configure/tests/test.yml diff --git a/roles/core/cluster/vars/main.yml b/roles/core_configure/vars/main.yml similarity index 100% rename from roles/core/cluster/vars/main.yml rename to roles/core_configure/vars/main.yml diff --git a/roles/core/node/defaults/main.yml b/roles/core_install/defaults/main.yml similarity index 100% rename from roles/core/node/defaults/main.yml rename to roles/core_install/defaults/main.yml diff --git a/roles/core/node/handlers/main.yml b/roles/core_install/handlers/main.yml similarity index 100% rename from roles/core/node/handlers/main.yml rename to roles/core_install/handlers/main.yml diff --git a/roles/core/node/meta/main.yml b/roles/core_install/meta/main.yml similarity index 100% rename from roles/core/node/meta/main.yml rename to roles/core_install/meta/main.yml diff --git a/roles/core/node/tasks/apt/install.yml b/roles/core_install/tasks/apt/install.yml similarity index 100% rename from roles/core/node/tasks/apt/install.yml rename to roles/core_install/tasks/apt/install.yml diff --git a/roles/core/node/tasks/build.yml b/roles/core_install/tasks/build.yml similarity index 100% rename from roles/core/node/tasks/build.yml rename to roles/core_install/tasks/build.yml diff --git a/roles/core/node/tasks/finalize.yml b/roles/core_install/tasks/finalize.yml similarity index 100% rename from roles/core/node/tasks/finalize.yml rename to roles/core_install/tasks/finalize.yml diff --git a/roles/core/node/tasks/install.yml b/roles/core_install/tasks/install.yml similarity index 100% rename from roles/core/node/tasks/install.yml rename to roles/core_install/tasks/install.yml diff --git a/roles/core/node/tasks/install_dir_pkg.yml b/roles/core_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/core/node/tasks/install_dir_pkg.yml rename to roles/core_install/tasks/install_dir_pkg.yml diff --git a/roles/core/node/tasks/install_gplbin.yml b/roles/core_install/tasks/install_gplbin.yml similarity index 100% rename from roles/core/node/tasks/install_gplbin.yml rename to roles/core_install/tasks/install_gplbin.yml diff --git a/roles/core/node/tasks/install_license_pkg.yml b/roles/core_install/tasks/install_license_pkg.yml similarity index 100% rename from roles/core/node/tasks/install_license_pkg.yml rename to roles/core_install/tasks/install_license_pkg.yml diff --git a/roles/core/node/tasks/install_license_repository.yml b/roles/core_install/tasks/install_license_repository.yml similarity index 100% rename from roles/core/node/tasks/install_license_repository.yml rename to roles/core_install/tasks/install_license_repository.yml diff --git a/roles/core/node/tasks/install_local_pkg.yml b/roles/core_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/core/node/tasks/install_local_pkg.yml rename to roles/core_install/tasks/install_local_pkg.yml diff --git a/roles/core/node/tasks/install_remote_pkg.yml b/roles/core_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/core/node/tasks/install_remote_pkg.yml rename to roles/core_install/tasks/install_remote_pkg.yml diff --git a/roles/core/node/tasks/install_repository.yml b/roles/core_install/tasks/install_repository.yml similarity index 100% rename from roles/core/node/tasks/install_repository.yml rename to roles/core_install/tasks/install_repository.yml diff --git a/roles/core/node/tasks/main.yml b/roles/core_install/tasks/main.yml similarity index 100% rename from roles/core/node/tasks/main.yml rename to roles/core_install/tasks/main.yml diff --git a/roles/core/node/tasks/update.yml b/roles/core_install/tasks/update.yml similarity index 100% rename from roles/core/node/tasks/update.yml rename to roles/core_install/tasks/update.yml diff --git a/roles/core/node/tasks/upgrade.yml b/roles/core_install/tasks/upgrade.yml similarity index 100% rename from roles/core/node/tasks/upgrade.yml rename to roles/core_install/tasks/upgrade.yml diff --git a/roles/core/node/tasks/yum/install.yml b/roles/core_install/tasks/yum/install.yml similarity index 100% rename from roles/core/node/tasks/yum/install.yml rename to roles/core_install/tasks/yum/install.yml diff --git a/roles/core/node/tasks/zypper/install.yml b/roles/core_install/tasks/zypper/install.yml similarity index 100% rename from roles/core/node/tasks/zypper/install.yml rename to roles/core_install/tasks/zypper/install.yml diff --git a/roles/core/node/templates/AddNodeFile.j2 b/roles/core_install/templates/AddNodeFile.j2 similarity index 100% rename from roles/core/node/templates/AddNodeFile.j2 rename to roles/core_install/templates/AddNodeFile.j2 diff --git a/roles/core/node/templates/ChangeFile.j2 b/roles/core_install/templates/ChangeFile.j2 similarity index 100% rename from roles/core/node/templates/ChangeFile.j2 rename to roles/core_install/templates/ChangeFile.j2 diff --git a/roles/core/node/templates/NewNodeFile.j2 b/roles/core_install/templates/NewNodeFile.j2 similarity index 100% rename from roles/core/node/templates/NewNodeFile.j2 rename to roles/core_install/templates/NewNodeFile.j2 diff --git a/roles/core/node/templates/NodeClass.j2 b/roles/core_install/templates/NodeClass.j2 similarity index 100% rename from roles/core/node/templates/NodeClass.j2 rename to roles/core_install/templates/NodeClass.j2 diff --git a/roles/core/node/templates/StanzaFile.j2 b/roles/core_install/templates/StanzaFile.j2 similarity index 100% rename from roles/core/node/templates/StanzaFile.j2 rename to roles/core_install/templates/StanzaFile.j2 diff --git a/roles/core/node/tests/inventory b/roles/core_install/tests/inventory similarity index 100% rename from roles/core/node/tests/inventory rename to roles/core_install/tests/inventory diff --git a/roles/core/node/tests/test.yml b/roles/core_install/tests/test.yml similarity index 100% rename from roles/core/node/tests/test.yml rename to roles/core_install/tests/test.yml diff --git a/roles/core/node/vars/main.yml b/roles/core_install/vars/main.yml similarity index 100% rename from roles/core/node/vars/main.yml rename to roles/core_install/vars/main.yml diff --git a/roles/core/precheck/defaults/main.yml b/roles/core_prepare/defaults/main.yml similarity index 100% rename from roles/core/precheck/defaults/main.yml rename to roles/core_prepare/defaults/main.yml diff --git a/roles/core/precheck/handlers/main.yml b/roles/core_prepare/handlers/main.yml similarity index 100% rename from roles/core/precheck/handlers/main.yml rename to roles/core_prepare/handlers/main.yml diff --git a/roles/core/precheck/meta/main.yml b/roles/core_prepare/meta/main.yml similarity index 100% rename from roles/core/precheck/meta/main.yml rename to roles/core_prepare/meta/main.yml diff --git a/roles/core/precheck/tasks/main.yml b/roles/core_prepare/tasks/main.yml similarity index 100% rename from roles/core/precheck/tasks/main.yml rename to roles/core_prepare/tasks/main.yml diff --git a/roles/core/precheck/tasks/prepare.yml b/roles/core_prepare/tasks/prepare.yml similarity index 100% rename from roles/core/precheck/tasks/prepare.yml rename to roles/core_prepare/tasks/prepare.yml diff --git a/roles/core/postcheck/tests/inventory b/roles/core_prepare/tests/inventory similarity index 100% rename from roles/core/postcheck/tests/inventory rename to roles/core_prepare/tests/inventory diff --git a/roles/core/postcheck/tests/test.yml b/roles/core_prepare/tests/test.yml similarity index 100% rename from roles/core/postcheck/tests/test.yml rename to roles/core_prepare/tests/test.yml diff --git a/roles/core/precheck/vars/main.yml b/roles/core_prepare/vars/main.yml similarity index 100% rename from roles/core/precheck/vars/main.yml rename to roles/core_prepare/vars/main.yml diff --git a/roles/core/upgrade/defaults/main.yml b/roles/core_upgrade/defaults/main.yml similarity index 100% rename from roles/core/upgrade/defaults/main.yml rename to roles/core_upgrade/defaults/main.yml diff --git a/roles/core/upgrade/handlers/main.yml b/roles/core_upgrade/handlers/main.yml similarity index 100% rename from roles/core/upgrade/handlers/main.yml rename to roles/core_upgrade/handlers/main.yml diff --git a/roles/core/upgrade/meta/main.yml b/roles/core_upgrade/meta/main.yml similarity index 100% rename from roles/core/upgrade/meta/main.yml rename to roles/core_upgrade/meta/main.yml diff --git a/roles/core/upgrade/tasks/apt/install.yml b/roles/core_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/core/upgrade/tasks/apt/install.yml rename to roles/core_upgrade/tasks/apt/install.yml diff --git a/roles/core/upgrade/tasks/build.yml b/roles/core_upgrade/tasks/build.yml similarity index 100% rename from roles/core/upgrade/tasks/build.yml rename to roles/core_upgrade/tasks/build.yml diff --git a/roles/core/upgrade/tasks/finalize.yml b/roles/core_upgrade/tasks/finalize.yml similarity index 100% rename from roles/core/upgrade/tasks/finalize.yml rename to roles/core_upgrade/tasks/finalize.yml diff --git a/roles/core/upgrade/tasks/install.yml b/roles/core_upgrade/tasks/install.yml similarity index 100% rename from roles/core/upgrade/tasks/install.yml rename to roles/core_upgrade/tasks/install.yml diff --git a/roles/core/upgrade/tasks/install_dir_pkg.yml b/roles/core_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_dir_pkg.yml rename to roles/core_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/core/upgrade/tasks/install_gplbin.yml b/roles/core_upgrade/tasks/install_gplbin.yml similarity index 100% rename from roles/core/upgrade/tasks/install_gplbin.yml rename to roles/core_upgrade/tasks/install_gplbin.yml diff --git a/roles/core/upgrade/tasks/install_license_pkg.yml b/roles/core_upgrade/tasks/install_license_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_license_pkg.yml rename to roles/core_upgrade/tasks/install_license_pkg.yml diff --git a/roles/core/upgrade/tasks/install_license_repository.yml b/roles/core_upgrade/tasks/install_license_repository.yml similarity index 100% rename from roles/core/upgrade/tasks/install_license_repository.yml rename to roles/core_upgrade/tasks/install_license_repository.yml diff --git a/roles/core/upgrade/tasks/install_local_pkg.yml b/roles/core_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_local_pkg.yml rename to roles/core_upgrade/tasks/install_local_pkg.yml diff --git a/roles/core/upgrade/tasks/install_remote_pkg.yml b/roles/core_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_remote_pkg.yml rename to roles/core_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/core/upgrade/tasks/install_repository.yml b/roles/core_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/core/upgrade/tasks/install_repository.yml rename to roles/core_upgrade/tasks/install_repository.yml diff --git a/roles/core/upgrade/tasks/main.yml b/roles/core_upgrade/tasks/main.yml similarity index 100% rename from roles/core/upgrade/tasks/main.yml rename to roles/core_upgrade/tasks/main.yml diff --git a/roles/core/upgrade/tasks/yum/install.yml b/roles/core_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/core/upgrade/tasks/yum/install.yml rename to roles/core_upgrade/tasks/yum/install.yml diff --git a/roles/core/upgrade/tasks/zypper/install.yml b/roles/core_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/core/upgrade/tasks/zypper/install.yml rename to roles/core_upgrade/tasks/zypper/install.yml diff --git a/roles/core/precheck/tests/inventory b/roles/core_upgrade/tests/inventory similarity index 100% rename from roles/core/precheck/tests/inventory rename to roles/core_upgrade/tests/inventory diff --git a/roles/core/precheck/tests/test.yml b/roles/core_upgrade/tests/test.yml similarity index 100% rename from roles/core/precheck/tests/test.yml rename to roles/core_upgrade/tests/test.yml diff --git a/roles/core/upgrade/vars/main.yml b/roles/core_upgrade/vars/main.yml similarity index 100% rename from roles/core/upgrade/vars/main.yml rename to roles/core_upgrade/vars/main.yml diff --git a/roles/core/postcheck/defaults/main.yml b/roles/core_verify/defaults/main.yml similarity index 100% rename from roles/core/postcheck/defaults/main.yml rename to roles/core_verify/defaults/main.yml diff --git a/roles/core/postcheck/handlers/main.yml b/roles/core_verify/handlers/main.yml similarity index 100% rename from roles/core/postcheck/handlers/main.yml rename to roles/core_verify/handlers/main.yml diff --git a/roles/core/postcheck/meta/main.yml b/roles/core_verify/meta/main.yml similarity index 100% rename from roles/core/postcheck/meta/main.yml rename to roles/core_verify/meta/main.yml diff --git a/roles/core/postcheck/tasks/main.yml b/roles/core_verify/tasks/main.yml similarity index 100% rename from roles/core/postcheck/tasks/main.yml rename to roles/core_verify/tasks/main.yml diff --git a/roles/core/upgrade/tests/inventory b/roles/core_verify/tests/inventory similarity index 100% rename from roles/core/upgrade/tests/inventory rename to roles/core_verify/tests/inventory diff --git a/roles/core/upgrade/tests/test.yml b/roles/core_verify/tests/test.yml similarity index 100% rename from roles/core/upgrade/tests/test.yml rename to roles/core_verify/tests/test.yml diff --git a/roles/core/postcheck/vars/main.yml b/roles/core_verify/vars/main.yml similarity index 100% rename from roles/core/postcheck/vars/main.yml rename to roles/core_verify/vars/main.yml diff --git a/roles/scale_ece/cluster/defaults/main.yml b/roles/ece_configure/defaults/main.yml similarity index 100% rename from roles/scale_ece/cluster/defaults/main.yml rename to roles/ece_configure/defaults/main.yml diff --git a/roles/scale_ece/cluster/handlers/main.yml b/roles/ece_configure/handlers/main.yml similarity index 100% rename from roles/scale_ece/cluster/handlers/main.yml rename to roles/ece_configure/handlers/main.yml diff --git a/roles/scale_ece/cluster/meta/main.yml b/roles/ece_configure/meta/main.yml similarity index 100% rename from roles/scale_ece/cluster/meta/main.yml rename to roles/ece_configure/meta/main.yml diff --git a/roles/scale_ece/cluster/tasks/create_filesystem.yml b/roles/ece_configure/tasks/create_filesystem.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/create_filesystem.yml rename to roles/ece_configure/tasks/create_filesystem.yml diff --git a/roles/scale_ece/cluster/tasks/create_recoverygroup.yml b/roles/ece_configure/tasks/create_recoverygroup.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/create_recoverygroup.yml rename to roles/ece_configure/tasks/create_recoverygroup.yml diff --git a/roles/scale_ece/cluster/tasks/create_vdisk.yml b/roles/ece_configure/tasks/create_vdisk.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/create_vdisk.yml rename to roles/ece_configure/tasks/create_vdisk.yml diff --git a/roles/scale_ece/cluster/tasks/main.yml b/roles/ece_configure/tasks/main.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/main.yml rename to roles/ece_configure/tasks/main.yml diff --git a/roles/remote_mount/tests/inventory b/roles/ece_configure/tests/inventory similarity index 100% rename from roles/remote_mount/tests/inventory rename to roles/ece_configure/tests/inventory diff --git a/roles/scale_ece/cluster/tests/test.yml b/roles/ece_configure/tests/test.yml similarity index 100% rename from roles/scale_ece/cluster/tests/test.yml rename to roles/ece_configure/tests/test.yml diff --git a/roles/scale_ece/cluster/vars/main.yml b/roles/ece_configure/vars/main.yml similarity index 100% rename from roles/scale_ece/cluster/vars/main.yml rename to roles/ece_configure/vars/main.yml diff --git a/roles/scale_ece/node/defaults/main.yml b/roles/ece_install/defaults/main.yml similarity index 100% rename from roles/scale_ece/node/defaults/main.yml rename to roles/ece_install/defaults/main.yml diff --git a/roles/nfs/node/handlers/main.yml b/roles/ece_install/handlers/main.yml similarity index 100% rename from roles/nfs/node/handlers/main.yml rename to roles/ece_install/handlers/main.yml diff --git a/roles/scale_ece/node/meta/main.yml b/roles/ece_install/meta/main.yml similarity index 100% rename from roles/scale_ece/node/meta/main.yml rename to roles/ece_install/meta/main.yml diff --git a/roles/scale_ece/node/tasks/install.yml b/roles/ece_install/tasks/install.yml similarity index 100% rename from roles/scale_ece/node/tasks/install.yml rename to roles/ece_install/tasks/install.yml diff --git a/roles/scale_ece/node/tasks/install_dir_pkg.yml b/roles/ece_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_dir_pkg.yml rename to roles/ece_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_ece/node/tasks/install_local_pkg.yml b/roles/ece_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_local_pkg.yml rename to roles/ece_install/tasks/install_local_pkg.yml diff --git a/roles/scale_ece/node/tasks/install_remote_pkg.yml b/roles/ece_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_remote_pkg.yml rename to roles/ece_install/tasks/install_remote_pkg.yml diff --git a/roles/scale_ece/node/tasks/install_repository.yml b/roles/ece_install/tasks/install_repository.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_repository.yml rename to roles/ece_install/tasks/install_repository.yml diff --git a/roles/scale_ece/node/tasks/main.yml b/roles/ece_install/tasks/main.yml similarity index 100% rename from roles/scale_ece/node/tasks/main.yml rename to roles/ece_install/tasks/main.yml diff --git a/roles/scale_ece/node/tasks/yum/install.yml b/roles/ece_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_ece/node/tasks/yum/install.yml rename to roles/ece_install/tasks/yum/install.yml diff --git a/roles/scale_ece/node/vars/main.yml b/roles/ece_install/vars/main.yml similarity index 100% rename from roles/scale_ece/node/vars/main.yml rename to roles/ece_install/vars/main.yml diff --git a/roles/scale_ece/precheck/meta/main.yml b/roles/ece_prepare/meta/main.yml similarity index 100% rename from roles/scale_ece/precheck/meta/main.yml rename to roles/ece_prepare/meta/main.yml diff --git a/roles/scale_ece/precheck/tasks/check.yml b/roles/ece_prepare/tasks/check.yml similarity index 100% rename from roles/scale_ece/precheck/tasks/check.yml rename to roles/ece_prepare/tasks/check.yml diff --git a/roles/scale_ece/precheck/tasks/main.yml b/roles/ece_prepare/tasks/main.yml similarity index 100% rename from roles/scale_ece/precheck/tasks/main.yml rename to roles/ece_prepare/tasks/main.yml diff --git a/roles/scale_ece/upgrade/defaults/main.yml b/roles/ece_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_ece/upgrade/defaults/main.yml rename to roles/ece_upgrade/defaults/main.yml diff --git a/roles/nfs/upgrade/handlers/main.yml b/roles/ece_upgrade/handlers/main.yml similarity index 100% rename from roles/nfs/upgrade/handlers/main.yml rename to roles/ece_upgrade/handlers/main.yml diff --git a/roles/scale_ece/upgrade/meta/main.yml b/roles/ece_upgrade/meta/main.yml similarity index 100% rename from roles/scale_ece/upgrade/meta/main.yml rename to roles/ece_upgrade/meta/main.yml diff --git a/roles/scale_ece/upgrade/tasks/install.yml b/roles/ece_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install.yml rename to roles/ece_upgrade/tasks/install.yml diff --git a/roles/scale_ece/upgrade/tasks/install_dir_pkg.yml b/roles/ece_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_dir_pkg.yml rename to roles/ece_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/scale_ece/upgrade/tasks/install_local_pkg.yml b/roles/ece_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_local_pkg.yml rename to roles/ece_upgrade/tasks/install_local_pkg.yml diff --git a/roles/scale_ece/upgrade/tasks/install_remote_pkg.yml b/roles/ece_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_remote_pkg.yml rename to roles/ece_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/scale_ece/upgrade/tasks/install_repository.yml b/roles/ece_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_repository.yml rename to roles/ece_upgrade/tasks/install_repository.yml diff --git a/roles/scale_ece/upgrade/tasks/main.yml b/roles/ece_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/main.yml rename to roles/ece_upgrade/tasks/main.yml diff --git a/roles/scale_ece/upgrade/tasks/yum/install.yml b/roles/ece_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/yum/install.yml rename to roles/ece_upgrade/tasks/yum/install.yml diff --git a/roles/scale_ece/upgrade/vars/main.yml b/roles/ece_upgrade/vars/main.yml similarity index 100% rename from roles/scale_ece/upgrade/vars/main.yml rename to roles/ece_upgrade/vars/main.yml diff --git a/roles/scale_fileauditlogging/cluster/defaults/main.yml b/roles/fal_configure/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/defaults/main.yml rename to roles/fal_configure/defaults/main.yml diff --git a/roles/scale_fileauditlogging/cluster/handlers/main.yml b/roles/fal_configure/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/handlers/main.yml rename to roles/fal_configure/handlers/main.yml diff --git a/roles/scale_fileauditlogging/cluster/meta/main.yml b/roles/fal_configure/meta/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/meta/main.yml rename to roles/fal_configure/meta/main.yml diff --git a/roles/scale_fileauditlogging/cluster/tasks/configure.yml b/roles/fal_configure/tasks/configure.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/tasks/configure.yml rename to roles/fal_configure/tasks/configure.yml diff --git a/roles/scale_fileauditlogging/cluster/tasks/configure_fal.yml b/roles/fal_configure/tasks/configure_fal.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/tasks/configure_fal.yml rename to roles/fal_configure/tasks/configure_fal.yml diff --git a/roles/scale_fileauditlogging/cluster/tasks/main.yml b/roles/fal_configure/tasks/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/tasks/main.yml rename to roles/fal_configure/tasks/main.yml diff --git a/roles/scale_ece/cluster/tests/inventory b/roles/fal_configure/tests/inventory similarity index 100% rename from roles/scale_ece/cluster/tests/inventory rename to roles/fal_configure/tests/inventory diff --git a/roles/scale_fileauditlogging/cluster/tests/test.yml b/roles/fal_configure/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/tests/test.yml rename to roles/fal_configure/tests/test.yml diff --git a/roles/scale_fileauditlogging/cluster/vars/main.yml b/roles/fal_configure/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/vars/main.yml rename to roles/fal_configure/vars/main.yml diff --git a/roles/scale_fileauditlogging/node/defaults/main.yml b/roles/fal_install/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/node/defaults/main.yml rename to roles/fal_install/defaults/main.yml diff --git a/roles/scale_fileauditlogging/node/meta/main.yml b/roles/fal_install/meta/main.yml similarity index 100% rename from roles/scale_fileauditlogging/node/meta/main.yml rename to roles/fal_install/meta/main.yml diff --git a/roles/scale_fileauditlogging/node/tasks/apt/install.yml b/roles/fal_install/tasks/apt/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/apt/install.yml rename to roles/fal_install/tasks/apt/install.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install.yml b/roles/fal_install/tasks/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/install.yml rename to roles/fal_install/tasks/install.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install_dir_pkg.yml b/roles/fal_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/install_dir_pkg.yml rename to roles/fal_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install_local_pkg.yml b/roles/fal_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/install_local_pkg.yml rename to roles/fal_install/tasks/install_local_pkg.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install_remote_pkg.yml b/roles/fal_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/install_remote_pkg.yml rename to roles/fal_install/tasks/install_remote_pkg.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/install_repository.yml rename to roles/fal_install/tasks/install_repository.yml diff --git a/roles/scale_fileauditlogging/node/tasks/main.yml b/roles/fal_install/tasks/main.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/main.yml rename to roles/fal_install/tasks/main.yml diff --git a/roles/scale_fileauditlogging/node/tasks/yum/install.yml b/roles/fal_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/yum/install.yml rename to roles/fal_install/tasks/yum/install.yml diff --git a/roles/scale_fileauditlogging/node/tasks/zypper/install.yml b/roles/fal_install/tasks/zypper/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/zypper/install.yml rename to roles/fal_install/tasks/zypper/install.yml diff --git a/roles/scale_fileauditlogging/cluster/tests/inventory b/roles/fal_install/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/cluster/tests/inventory rename to roles/fal_install/tests/inventory diff --git a/roles/scale_fileauditlogging/node/tests/test.yml b/roles/fal_install/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tests/test.yml rename to roles/fal_install/tests/test.yml diff --git a/roles/scale_fileauditlogging/node/vars/main.yml b/roles/fal_install/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/node/vars/main.yml rename to roles/fal_install/vars/main.yml diff --git a/roles/scale_fileauditlogging/precheck/defaults/main.yml b/roles/fal_prepare/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/defaults/main.yml rename to roles/fal_prepare/defaults/main.yml diff --git a/roles/scale_fileauditlogging/precheck/handlers/main.yml b/roles/fal_prepare/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/handlers/main.yml rename to roles/fal_prepare/handlers/main.yml diff --git a/roles/scale_fileauditlogging/postcheck/meta/main.yml b/roles/fal_prepare/meta/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/meta/main.yml rename to roles/fal_prepare/meta/main.yml diff --git a/roles/scale_fileauditlogging/precheck/tasks/main.yml b/roles/fal_prepare/tasks/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/tasks/main.yml rename to roles/fal_prepare/tasks/main.yml diff --git a/roles/scale_fileauditlogging/node/tests/inventory b/roles/fal_prepare/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/node/tests/inventory rename to roles/fal_prepare/tests/inventory diff --git a/roles/scale_fileauditlogging/precheck/tests/test.yml b/roles/fal_prepare/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/tests/test.yml rename to roles/fal_prepare/tests/test.yml diff --git a/roles/scale_fileauditlogging/precheck/vars/main.yml b/roles/fal_prepare/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/vars/main.yml rename to roles/fal_prepare/vars/main.yml diff --git a/roles/scale_fileauditlogging/upgrade/defaults/main.yml b/roles/fal_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/defaults/main.yml rename to roles/fal_upgrade/defaults/main.yml diff --git a/roles/scale_fileauditlogging/upgrade/handlers/main.yml b/roles/fal_upgrade/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/handlers/main.yml rename to roles/fal_upgrade/handlers/main.yml diff --git a/roles/scale_fileauditlogging/upgrade/meta/main.yml b/roles/fal_upgrade/meta/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/meta/main.yml rename to roles/fal_upgrade/meta/main.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/apt/install.yml b/roles/fal_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/apt/install.yml rename to roles/fal_upgrade/tasks/apt/install.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install.yml b/roles/fal_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/install.yml rename to roles/fal_upgrade/tasks/install.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_dir_pkg.yml b/roles/fal_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/install_dir_pkg.yml rename to roles/fal_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_local_pkg.yml b/roles/fal_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/install_local_pkg.yml rename to roles/fal_upgrade/tasks/install_local_pkg.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_remote_pkg.yml b/roles/fal_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/install_remote_pkg.yml rename to roles/fal_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/install_repository.yml rename to roles/fal_upgrade/tasks/install_repository.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/main.yml b/roles/fal_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/main.yml rename to roles/fal_upgrade/tasks/main.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/yum/install.yml b/roles/fal_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/yum/install.yml rename to roles/fal_upgrade/tasks/yum/install.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/zypper/install.yml b/roles/fal_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/zypper/install.yml rename to roles/fal_upgrade/tasks/zypper/install.yml diff --git a/roles/scale_fileauditlogging/postcheck/tests/inventory b/roles/fal_upgrade/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tests/inventory rename to roles/fal_upgrade/tests/inventory diff --git a/roles/scale_fileauditlogging/upgrade/tests/test.yml b/roles/fal_upgrade/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tests/test.yml rename to roles/fal_upgrade/tests/test.yml diff --git a/roles/scale_fileauditlogging/upgrade/vars/main.yml b/roles/fal_upgrade/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/vars/main.yml rename to roles/fal_upgrade/vars/main.yml diff --git a/roles/scale_fileauditlogging/postcheck/defaults/main.yml b/roles/fal_verify/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/defaults/main.yml rename to roles/fal_verify/defaults/main.yml diff --git a/roles/scale_fileauditlogging/postcheck/handlers/main.yml b/roles/fal_verify/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/handlers/main.yml rename to roles/fal_verify/handlers/main.yml diff --git a/roles/scale_fileauditlogging/precheck/meta/main.yml b/roles/fal_verify/meta/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/meta/main.yml rename to roles/fal_verify/meta/main.yml diff --git a/roles/scale_fileauditlogging/postcheck/tasks/check.yml b/roles/fal_verify/tasks/check.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tasks/check.yml rename to roles/fal_verify/tasks/check.yml diff --git a/roles/scale_fileauditlogging/postcheck/tasks/main.yml b/roles/fal_verify/tasks/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tasks/main.yml rename to roles/fal_verify/tasks/main.yml diff --git a/roles/scale_fileauditlogging/precheck/tests/inventory b/roles/fal_verify/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/precheck/tests/inventory rename to roles/fal_verify/tests/inventory diff --git a/roles/scale_fileauditlogging/postcheck/tests/test.yml b/roles/fal_verify/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tests/test.yml rename to roles/fal_verify/tests/test.yml diff --git a/roles/scale_fileauditlogging/postcheck/vars/main.yml b/roles/fal_verify/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/vars/main.yml rename to roles/fal_verify/vars/main.yml diff --git a/roles/gui/cluster/defaults/main.yml b/roles/gui_configure/defaults/main.yml similarity index 100% rename from roles/gui/cluster/defaults/main.yml rename to roles/gui_configure/defaults/main.yml diff --git a/roles/gui/cluster/meta/main.yml b/roles/gui_configure/meta/main.yml similarity index 100% rename from roles/gui/cluster/meta/main.yml rename to roles/gui_configure/meta/main.yml diff --git a/roles/gui/cluster/tasks/chpasswdpolicy.yml b/roles/gui_configure/tasks/chpasswdpolicy.yml similarity index 100% rename from roles/gui/cluster/tasks/chpasswdpolicy.yml rename to roles/gui_configure/tasks/chpasswdpolicy.yml diff --git a/roles/gui/cluster/tasks/configure.yml b/roles/gui_configure/tasks/configure.yml similarity index 100% rename from roles/gui/cluster/tasks/configure.yml rename to roles/gui_configure/tasks/configure.yml diff --git a/roles/gui/cluster/tasks/email.yml b/roles/gui_configure/tasks/email.yml similarity index 100% rename from roles/gui/cluster/tasks/email.yml rename to roles/gui_configure/tasks/email.yml diff --git a/roles/gui/cluster/tasks/hasi_vault_certificate.yml b/roles/gui_configure/tasks/hasi_vault_certificate.yml similarity index 100% rename from roles/gui/cluster/tasks/hasi_vault_certificate.yml rename to roles/gui_configure/tasks/hasi_vault_certificate.yml diff --git a/roles/gui/cluster/tasks/hasi_vault_user.yml b/roles/gui_configure/tasks/hasi_vault_user.yml similarity index 100% rename from roles/gui/cluster/tasks/hasi_vault_user.yml rename to roles/gui_configure/tasks/hasi_vault_user.yml diff --git a/roles/gui/cluster/tasks/ldap.yml b/roles/gui_configure/tasks/ldap.yml similarity index 100% rename from roles/gui/cluster/tasks/ldap.yml rename to roles/gui_configure/tasks/ldap.yml diff --git a/roles/gui/cluster/tasks/main.yml b/roles/gui_configure/tasks/main.yml similarity index 100% rename from roles/gui/cluster/tasks/main.yml rename to roles/gui_configure/tasks/main.yml diff --git a/roles/gui/cluster/tasks/snmp.yml b/roles/gui_configure/tasks/snmp.yml similarity index 100% rename from roles/gui/cluster/tasks/snmp.yml rename to roles/gui_configure/tasks/snmp.yml diff --git a/roles/gui/cluster/tasks/users.yml b/roles/gui_configure/tasks/users.yml similarity index 100% rename from roles/gui/cluster/tasks/users.yml rename to roles/gui_configure/tasks/users.yml diff --git a/roles/gui/node/defaults/main.yml b/roles/gui_install/defaults/main.yml similarity index 100% rename from roles/gui/node/defaults/main.yml rename to roles/gui_install/defaults/main.yml diff --git a/roles/gui/node/meta/main.yml b/roles/gui_install/meta/main.yml similarity index 100% rename from roles/gui/node/meta/main.yml rename to roles/gui_install/meta/main.yml diff --git a/roles/gui/node/tasks/apt/install.yml b/roles/gui_install/tasks/apt/install.yml similarity index 100% rename from roles/gui/node/tasks/apt/install.yml rename to roles/gui_install/tasks/apt/install.yml diff --git a/roles/gui/node/tasks/install.yml b/roles/gui_install/tasks/install.yml similarity index 100% rename from roles/gui/node/tasks/install.yml rename to roles/gui_install/tasks/install.yml diff --git a/roles/gui/node/tasks/install_dir_pkg.yml b/roles/gui_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/gui/node/tasks/install_dir_pkg.yml rename to roles/gui_install/tasks/install_dir_pkg.yml diff --git a/roles/gui/node/tasks/install_local_pkg.yml b/roles/gui_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/gui/node/tasks/install_local_pkg.yml rename to roles/gui_install/tasks/install_local_pkg.yml diff --git a/roles/gui/node/tasks/install_remote_pkg.yml b/roles/gui_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/gui/node/tasks/install_remote_pkg.yml rename to roles/gui_install/tasks/install_remote_pkg.yml diff --git a/roles/gui/node/tasks/install_repository.yml b/roles/gui_install/tasks/install_repository.yml similarity index 100% rename from roles/gui/node/tasks/install_repository.yml rename to roles/gui_install/tasks/install_repository.yml diff --git a/roles/gui/node/tasks/main.yml b/roles/gui_install/tasks/main.yml similarity index 100% rename from roles/gui/node/tasks/main.yml rename to roles/gui_install/tasks/main.yml diff --git a/roles/gui/node/tasks/yum/install.yml b/roles/gui_install/tasks/yum/install.yml similarity index 100% rename from roles/gui/node/tasks/yum/install.yml rename to roles/gui_install/tasks/yum/install.yml diff --git a/roles/gui/node/tasks/zypper/install.yml b/roles/gui_install/tasks/zypper/install.yml similarity index 100% rename from roles/gui/node/tasks/zypper/install.yml rename to roles/gui_install/tasks/zypper/install.yml diff --git a/roles/gui/node/vars/main.yml b/roles/gui_install/vars/main.yml similarity index 100% rename from roles/gui/node/vars/main.yml rename to roles/gui_install/vars/main.yml diff --git a/roles/gui/precheck/defaults/main.yml b/roles/gui_prepare/defaults/main.yml similarity index 100% rename from roles/gui/precheck/defaults/main.yml rename to roles/gui_prepare/defaults/main.yml diff --git a/roles/gui/precheck/meta/main.yml b/roles/gui_prepare/meta/main.yml similarity index 100% rename from roles/gui/precheck/meta/main.yml rename to roles/gui_prepare/meta/main.yml diff --git a/roles/gui/precheck/tasks/inventory_check.yml b/roles/gui_prepare/tasks/inventory_check.yml similarity index 100% rename from roles/gui/precheck/tasks/inventory_check.yml rename to roles/gui_prepare/tasks/inventory_check.yml diff --git a/roles/gui/precheck/tasks/main.yml b/roles/gui_prepare/tasks/main.yml similarity index 100% rename from roles/gui/precheck/tasks/main.yml rename to roles/gui_prepare/tasks/main.yml diff --git a/roles/gui/upgrade/defaults/main.yml b/roles/gui_upgrade/defaults/main.yml similarity index 100% rename from roles/gui/upgrade/defaults/main.yml rename to roles/gui_upgrade/defaults/main.yml diff --git a/roles/gui/upgrade/meta/main.yml b/roles/gui_upgrade/meta/main.yml similarity index 100% rename from roles/gui/upgrade/meta/main.yml rename to roles/gui_upgrade/meta/main.yml diff --git a/roles/gui/upgrade/tasks/apt/install.yml b/roles/gui_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/apt/install.yml rename to roles/gui_upgrade/tasks/apt/install.yml diff --git a/roles/gui/upgrade/tasks/install.yml b/roles/gui_upgrade/tasks/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/install.yml rename to roles/gui_upgrade/tasks/install.yml diff --git a/roles/gui/upgrade/tasks/install_dir_pkg.yml b/roles/gui_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_dir_pkg.yml rename to roles/gui_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/gui/upgrade/tasks/install_local_pkg.yml b/roles/gui_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_local_pkg.yml rename to roles/gui_upgrade/tasks/install_local_pkg.yml diff --git a/roles/gui/upgrade/tasks/install_remote_pkg.yml b/roles/gui_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_remote_pkg.yml rename to roles/gui_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/gui/upgrade/tasks/install_repository.yml b/roles/gui_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_repository.yml rename to roles/gui_upgrade/tasks/install_repository.yml diff --git a/roles/gui/upgrade/tasks/main.yml b/roles/gui_upgrade/tasks/main.yml similarity index 100% rename from roles/gui/upgrade/tasks/main.yml rename to roles/gui_upgrade/tasks/main.yml diff --git a/roles/gui/upgrade/tasks/yum/install.yml b/roles/gui_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/yum/install.yml rename to roles/gui_upgrade/tasks/yum/install.yml diff --git a/roles/gui/upgrade/tasks/zypper/install.yml b/roles/gui_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/zypper/install.yml rename to roles/gui_upgrade/tasks/zypper/install.yml diff --git a/roles/gui/upgrade/vars/main.yml b/roles/gui_upgrade/vars/main.yml similarity index 100% rename from roles/gui/upgrade/vars/main.yml rename to roles/gui_upgrade/vars/main.yml diff --git a/roles/gui/postcheck/defaults/main.yml b/roles/gui_verify/defaults/main.yml similarity index 100% rename from roles/gui/postcheck/defaults/main.yml rename to roles/gui_verify/defaults/main.yml diff --git a/roles/gui/postcheck/meta/main.yml b/roles/gui_verify/meta/main.yml similarity index 100% rename from roles/gui/postcheck/meta/main.yml rename to roles/gui_verify/meta/main.yml diff --git a/roles/gui/postcheck/tasks/main.yml b/roles/gui_verify/tasks/main.yml similarity index 100% rename from roles/gui/postcheck/tasks/main.yml rename to roles/gui_verify/tasks/main.yml diff --git a/roles/scale_hdfs/cluster/.travis.yml b/roles/hdfs_configure/.travis.yml similarity index 100% rename from roles/scale_hdfs/cluster/.travis.yml rename to roles/hdfs_configure/.travis.yml diff --git a/roles/scale_hdfs/cluster/defaults/main.yml b/roles/hdfs_configure/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/cluster/defaults/main.yml rename to roles/hdfs_configure/defaults/main.yml diff --git a/roles/scale_hdfs/cluster/meta/main.yml b/roles/hdfs_configure/meta/main.yml similarity index 100% rename from roles/scale_hdfs/cluster/meta/main.yml rename to roles/hdfs_configure/meta/main.yml diff --git a/roles/scale_hdfs/cluster/tasks/append_dict.yml b/roles/hdfs_configure/tasks/append_dict.yml similarity index 100% rename from roles/scale_hdfs/cluster/tasks/append_dict.yml rename to roles/hdfs_configure/tasks/append_dict.yml diff --git a/roles/scale_hdfs/cluster/tasks/configure.yml b/roles/hdfs_configure/tasks/configure.yml similarity index 100% rename from roles/scale_hdfs/cluster/tasks/configure.yml rename to roles/hdfs_configure/tasks/configure.yml diff --git a/roles/scale_hdfs/cluster/tasks/env_setup.yml b/roles/hdfs_configure/tasks/env_setup.yml similarity index 100% rename from roles/scale_hdfs/cluster/tasks/env_setup.yml rename to roles/hdfs_configure/tasks/env_setup.yml diff --git a/roles/scale_hdfs/cluster/tasks/main.yml b/roles/hdfs_configure/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/cluster/tasks/main.yml rename to roles/hdfs_configure/tasks/main.yml diff --git a/roles/scale_hdfs/cluster/vars/main.yml b/roles/hdfs_configure/vars/main.yml similarity index 100% rename from roles/scale_hdfs/cluster/vars/main.yml rename to roles/hdfs_configure/vars/main.yml diff --git a/roles/scale_hdfs/node/.travis.yml b/roles/hdfs_install/.travis.yml similarity index 100% rename from roles/scale_hdfs/node/.travis.yml rename to roles/hdfs_install/.travis.yml diff --git a/roles/scale_hdfs/node/defaults/main.yml b/roles/hdfs_install/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/node/defaults/main.yml rename to roles/hdfs_install/defaults/main.yml diff --git a/roles/scale_ece/node/handlers/main.yml b/roles/hdfs_install/handlers/main.yml similarity index 100% rename from roles/scale_ece/node/handlers/main.yml rename to roles/hdfs_install/handlers/main.yml diff --git a/roles/scale_hdfs/node/meta/main.yml b/roles/hdfs_install/meta/main.yml similarity index 100% rename from roles/scale_hdfs/node/meta/main.yml rename to roles/hdfs_install/meta/main.yml diff --git a/roles/scale_hdfs/node/tasks/install.yml b/roles/hdfs_install/tasks/install.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/install.yml rename to roles/hdfs_install/tasks/install.yml diff --git a/roles/scale_hdfs/node/tasks/install_dir_pkg.yml b/roles/hdfs_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/install_dir_pkg.yml rename to roles/hdfs_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_hdfs/node/tasks/install_local_pkg.yml b/roles/hdfs_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/install_local_pkg.yml rename to roles/hdfs_install/tasks/install_local_pkg.yml diff --git a/roles/scale_hdfs/node/tasks/install_remote_pkg.yml b/roles/hdfs_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/install_remote_pkg.yml rename to roles/hdfs_install/tasks/install_remote_pkg.yml diff --git a/roles/scale_hdfs/node/tasks/install_repository.yml b/roles/hdfs_install/tasks/install_repository.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/install_repository.yml rename to roles/hdfs_install/tasks/install_repository.yml diff --git a/roles/scale_hdfs/node/tasks/main.yml b/roles/hdfs_install/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/main.yml rename to roles/hdfs_install/tasks/main.yml diff --git a/roles/scale_hdfs/node/tasks/prepare_env.yml b/roles/hdfs_install/tasks/prepare_env.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/prepare_env.yml rename to roles/hdfs_install/tasks/prepare_env.yml diff --git a/roles/scale_hdfs/node/tasks/yum/install.yml b/roles/hdfs_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/yum/install.yml rename to roles/hdfs_install/tasks/yum/install.yml diff --git a/roles/scale_hdfs/node/vars/main.yml b/roles/hdfs_install/vars/main.yml similarity index 100% rename from roles/scale_hdfs/node/vars/main.yml rename to roles/hdfs_install/vars/main.yml diff --git a/roles/scale_hdfs/postcheck/.travis.yml b/roles/hdfs_prepare/.travis.yml similarity index 100% rename from roles/scale_hdfs/postcheck/.travis.yml rename to roles/hdfs_prepare/.travis.yml diff --git a/roles/scale_hdfs/precheck/defaults/main.yml b/roles/hdfs_prepare/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/precheck/defaults/main.yml rename to roles/hdfs_prepare/defaults/main.yml diff --git a/roles/nfs/postcheck/meta/main.yml b/roles/hdfs_prepare/meta/main.yml similarity index 100% rename from roles/nfs/postcheck/meta/main.yml rename to roles/hdfs_prepare/meta/main.yml diff --git a/roles/scale_hdfs/precheck/tasks/check.yml b/roles/hdfs_prepare/tasks/check.yml similarity index 100% rename from roles/scale_hdfs/precheck/tasks/check.yml rename to roles/hdfs_prepare/tasks/check.yml diff --git a/roles/scale_hdfs/precheck/tasks/java_home.yml b/roles/hdfs_prepare/tasks/java_home.yml similarity index 100% rename from roles/scale_hdfs/precheck/tasks/java_home.yml rename to roles/hdfs_prepare/tasks/java_home.yml diff --git a/roles/scale_hdfs/precheck/tasks/main.yml b/roles/hdfs_prepare/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/precheck/tasks/main.yml rename to roles/hdfs_prepare/tasks/main.yml diff --git a/roles/scale_hdfs/precheck/tasks/prepare_env.yml b/roles/hdfs_prepare/tasks/prepare_env.yml similarity index 100% rename from roles/scale_hdfs/precheck/tasks/prepare_env.yml rename to roles/hdfs_prepare/tasks/prepare_env.yml diff --git a/roles/scale_hdfs/precheck/vars/main.yml b/roles/hdfs_prepare/vars/main.yml similarity index 100% rename from roles/scale_hdfs/precheck/vars/main.yml rename to roles/hdfs_prepare/vars/main.yml diff --git a/roles/scale_hdfs/upgrade/defaults/main.yml b/roles/hdfs_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/upgrade/defaults/main.yml rename to roles/hdfs_upgrade/defaults/main.yml diff --git a/roles/scale_hdfs/upgrade/handlers/mail.yml b/roles/hdfs_upgrade/handlers/mail.yml similarity index 100% rename from roles/scale_hdfs/upgrade/handlers/mail.yml rename to roles/hdfs_upgrade/handlers/mail.yml diff --git a/roles/scale_auth/upgrade/meta/main.yml b/roles/hdfs_upgrade/meta/main.yml similarity index 100% rename from roles/scale_auth/upgrade/meta/main.yml rename to roles/hdfs_upgrade/meta/main.yml diff --git a/roles/scale_hdfs/upgrade/tasks/main.yml b/roles/hdfs_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/main.yml rename to roles/hdfs_upgrade/tasks/main.yml diff --git a/roles/scale_hdfs/upgrade/tasks/prepare_env.yml b/roles/hdfs_upgrade/tasks/prepare_env.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/prepare_env.yml rename to roles/hdfs_upgrade/tasks/prepare_env.yml diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade.yml b/roles/hdfs_upgrade/tasks/upgrade.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/upgrade.yml rename to roles/hdfs_upgrade/tasks/upgrade.yml diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_dir_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/upgrade_dir_pkg.yml rename to roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_local_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/upgrade_local_pkg.yml rename to roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_remote_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_remote_pkg.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/upgrade_remote_pkg.yml rename to roles/hdfs_upgrade/tasks/upgrade_remote_pkg.yml diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_repository.yml b/roles/hdfs_upgrade/tasks/upgrade_repository.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/upgrade_repository.yml rename to roles/hdfs_upgrade/tasks/upgrade_repository.yml diff --git a/roles/scale_hdfs/upgrade/tasks/yum/upgrade.yml b/roles/hdfs_upgrade/tasks/yum/upgrade.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/yum/upgrade.yml rename to roles/hdfs_upgrade/tasks/yum/upgrade.yml diff --git a/roles/scale_hdfs/upgrade/vars/main.yml b/roles/hdfs_upgrade/vars/main.yml similarity index 100% rename from roles/scale_hdfs/upgrade/vars/main.yml rename to roles/hdfs_upgrade/vars/main.yml diff --git a/roles/scale_hdfs/precheck/.travis.yml b/roles/hdfs_verify/.travis.yml similarity index 100% rename from roles/scale_hdfs/precheck/.travis.yml rename to roles/hdfs_verify/.travis.yml diff --git a/roles/scale_hdfs/postcheck/defaults/main.yml b/roles/hdfs_verify/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/defaults/main.yml rename to roles/hdfs_verify/defaults/main.yml diff --git a/roles/nfs/precheck/meta/main.yml b/roles/hdfs_verify/meta/main.yml similarity index 100% rename from roles/nfs/precheck/meta/main.yml rename to roles/hdfs_verify/meta/main.yml diff --git a/roles/scale_hdfs/postcheck/tasks/check.yml b/roles/hdfs_verify/tasks/check.yml similarity index 100% rename from roles/scale_hdfs/postcheck/tasks/check.yml rename to roles/hdfs_verify/tasks/check.yml diff --git a/roles/scale_hdfs/postcheck/tasks/main.yml b/roles/hdfs_verify/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/tasks/main.yml rename to roles/hdfs_verify/tasks/main.yml diff --git a/roles/scale_hdfs/postcheck/vars/main.yml b/roles/hdfs_verify/vars/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/vars/main.yml rename to roles/hdfs_verify/vars/main.yml diff --git a/roles/nfs/cluster/defaults/main.yml b/roles/nfs_configure/defaults/main.yml similarity index 100% rename from roles/nfs/cluster/defaults/main.yml rename to roles/nfs_configure/defaults/main.yml diff --git a/roles/nfs/cluster/meta/main.yml b/roles/nfs_configure/meta/main.yml similarity index 100% rename from roles/nfs/cluster/meta/main.yml rename to roles/nfs_configure/meta/main.yml diff --git a/roles/nfs/cluster/tasks/configure.yml b/roles/nfs_configure/tasks/configure.yml similarity index 100% rename from roles/nfs/cluster/tasks/configure.yml rename to roles/nfs_configure/tasks/configure.yml diff --git a/roles/nfs/cluster/tasks/main.yml b/roles/nfs_configure/tasks/main.yml similarity index 100% rename from roles/nfs/cluster/tasks/main.yml rename to roles/nfs_configure/tasks/main.yml diff --git a/roles/nfs/cluster/vars/main.yml b/roles/nfs_configure/vars/main.yml similarity index 100% rename from roles/nfs/cluster/vars/main.yml rename to roles/nfs_configure/vars/main.yml diff --git a/roles/nfs/node/defaults/main.yml b/roles/nfs_install/defaults/main.yml similarity index 100% rename from roles/nfs/node/defaults/main.yml rename to roles/nfs_install/defaults/main.yml diff --git a/roles/scale_ece/upgrade/handlers/main.yml b/roles/nfs_install/handlers/main.yml similarity index 100% rename from roles/scale_ece/upgrade/handlers/main.yml rename to roles/nfs_install/handlers/main.yml diff --git a/roles/nfs/node/meta/main.yml b/roles/nfs_install/meta/main.yml similarity index 100% rename from roles/nfs/node/meta/main.yml rename to roles/nfs_install/meta/main.yml diff --git a/roles/nfs/node/tasks/apt/install.yml b/roles/nfs_install/tasks/apt/install.yml similarity index 100% rename from roles/nfs/node/tasks/apt/install.yml rename to roles/nfs_install/tasks/apt/install.yml diff --git a/roles/nfs/node/tasks/install.yml b/roles/nfs_install/tasks/install.yml similarity index 100% rename from roles/nfs/node/tasks/install.yml rename to roles/nfs_install/tasks/install.yml diff --git a/roles/nfs/node/tasks/install_dir_pkg.yml b/roles/nfs_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/nfs/node/tasks/install_dir_pkg.yml rename to roles/nfs_install/tasks/install_dir_pkg.yml diff --git a/roles/nfs/node/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/nfs/node/tasks/install_local_pkg.yml rename to roles/nfs_install/tasks/install_local_pkg.yml diff --git a/roles/nfs/node/tasks/install_remote_pkg.yml b/roles/nfs_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/nfs/node/tasks/install_remote_pkg.yml rename to roles/nfs_install/tasks/install_remote_pkg.yml diff --git a/roles/nfs/node/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml similarity index 100% rename from roles/nfs/node/tasks/install_repository.yml rename to roles/nfs_install/tasks/install_repository.yml diff --git a/roles/nfs/node/tasks/main.yml b/roles/nfs_install/tasks/main.yml similarity index 100% rename from roles/nfs/node/tasks/main.yml rename to roles/nfs_install/tasks/main.yml diff --git a/roles/nfs/node/tasks/yum/install.yml b/roles/nfs_install/tasks/yum/install.yml similarity index 100% rename from roles/nfs/node/tasks/yum/install.yml rename to roles/nfs_install/tasks/yum/install.yml diff --git a/roles/nfs/node/tasks/zypper/install.yml b/roles/nfs_install/tasks/zypper/install.yml similarity index 100% rename from roles/nfs/node/tasks/zypper/install.yml rename to roles/nfs_install/tasks/zypper/install.yml diff --git a/roles/nfs/node/vars/main.yml b/roles/nfs_install/vars/main.yml similarity index 100% rename from roles/nfs/node/vars/main.yml rename to roles/nfs_install/vars/main.yml diff --git a/roles/scale_hdfs/postcheck/meta/main.yml b/roles/nfs_prepare/meta/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/meta/main.yml rename to roles/nfs_prepare/meta/main.yml diff --git a/roles/nfs/precheck/tasks/check.yml b/roles/nfs_prepare/tasks/check.yml similarity index 100% rename from roles/nfs/precheck/tasks/check.yml rename to roles/nfs_prepare/tasks/check.yml diff --git a/roles/nfs/precheck/tasks/main.yml b/roles/nfs_prepare/tasks/main.yml similarity index 100% rename from roles/nfs/precheck/tasks/main.yml rename to roles/nfs_prepare/tasks/main.yml diff --git a/roles/nfs/upgrade/defaults/main.yml b/roles/nfs_upgrade/defaults/main.yml similarity index 100% rename from roles/nfs/upgrade/defaults/main.yml rename to roles/nfs_upgrade/defaults/main.yml diff --git a/roles/scale_hdfs/node/handlers/main.yml b/roles/nfs_upgrade/handlers/main.yml similarity index 100% rename from roles/scale_hdfs/node/handlers/main.yml rename to roles/nfs_upgrade/handlers/main.yml diff --git a/roles/scale_hdfs/upgrade/meta/main.yml b/roles/nfs_upgrade/meta/main.yml similarity index 100% rename from roles/scale_hdfs/upgrade/meta/main.yml rename to roles/nfs_upgrade/meta/main.yml diff --git a/roles/nfs/upgrade/tasks/apt/install.yml b/roles/nfs_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/apt/install.yml rename to roles/nfs_upgrade/tasks/apt/install.yml diff --git a/roles/nfs/upgrade/tasks/install.yml b/roles/nfs_upgrade/tasks/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/install.yml rename to roles/nfs_upgrade/tasks/install.yml diff --git a/roles/nfs/upgrade/tasks/install_dir_pkg.yml b/roles/nfs_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/nfs/upgrade/tasks/install_dir_pkg.yml rename to roles/nfs_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/nfs/upgrade/tasks/install_local_pkg.yml b/roles/nfs_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/nfs/upgrade/tasks/install_local_pkg.yml rename to roles/nfs_upgrade/tasks/install_local_pkg.yml diff --git a/roles/nfs/upgrade/tasks/install_remote_pkg.yml b/roles/nfs_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/nfs/upgrade/tasks/install_remote_pkg.yml rename to roles/nfs_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/nfs/upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/nfs/upgrade/tasks/install_repository.yml rename to roles/nfs_upgrade/tasks/install_repository.yml diff --git a/roles/nfs/upgrade/tasks/main.yml b/roles/nfs_upgrade/tasks/main.yml similarity index 100% rename from roles/nfs/upgrade/tasks/main.yml rename to roles/nfs_upgrade/tasks/main.yml diff --git a/roles/nfs/upgrade/tasks/yum/install.yml b/roles/nfs_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/yum/install.yml rename to roles/nfs_upgrade/tasks/yum/install.yml diff --git a/roles/nfs/upgrade/tasks/zypper/install.yml b/roles/nfs_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/zypper/install.yml rename to roles/nfs_upgrade/tasks/zypper/install.yml diff --git a/roles/nfs/upgrade/vars/main.yml b/roles/nfs_upgrade/vars/main.yml similarity index 100% rename from roles/nfs/upgrade/vars/main.yml rename to roles/nfs_upgrade/vars/main.yml diff --git a/roles/nfs/postcheck/defaults/main.yml b/roles/nfs_verify/defaults/main.yml similarity index 100% rename from roles/nfs/postcheck/defaults/main.yml rename to roles/nfs_verify/defaults/main.yml diff --git a/roles/scale_hdfs/precheck/meta/main.yml b/roles/nfs_verify/meta/main.yml similarity index 100% rename from roles/scale_hdfs/precheck/meta/main.yml rename to roles/nfs_verify/meta/main.yml diff --git a/roles/nfs/postcheck/tasks/check.yml b/roles/nfs_verify/tasks/check.yml similarity index 100% rename from roles/nfs/postcheck/tasks/check.yml rename to roles/nfs_verify/tasks/check.yml diff --git a/roles/nfs/postcheck/tasks/main.yml b/roles/nfs_verify/tasks/main.yml similarity index 100% rename from roles/nfs/postcheck/tasks/main.yml rename to roles/nfs_verify/tasks/main.yml diff --git a/roles/nfs/postcheck/vars/main.yml b/roles/nfs_verify/vars/main.yml similarity index 100% rename from roles/nfs/postcheck/vars/main.yml rename to roles/nfs_verify/vars/main.yml diff --git a/roles/scale_object/cluster/defaults/main.yml b/roles/obj_configure/defaults/main.yml similarity index 100% rename from roles/scale_object/cluster/defaults/main.yml rename to roles/obj_configure/defaults/main.yml diff --git a/roles/scale_object/cluster/meta/main.yml b/roles/obj_configure/meta/main.yml similarity index 100% rename from roles/scale_object/cluster/meta/main.yml rename to roles/obj_configure/meta/main.yml diff --git a/roles/scale_object/cluster/tasks/configure.yml b/roles/obj_configure/tasks/configure.yml similarity index 100% rename from roles/scale_object/cluster/tasks/configure.yml rename to roles/obj_configure/tasks/configure.yml diff --git a/roles/scale_object/cluster/tasks/configure_pmswift.yml b/roles/obj_configure/tasks/configure_pmswift.yml similarity index 100% rename from roles/scale_object/cluster/tasks/configure_pmswift.yml rename to roles/obj_configure/tasks/configure_pmswift.yml diff --git a/roles/scale_object/cluster/tasks/main.yml b/roles/obj_configure/tasks/main.yml similarity index 100% rename from roles/scale_object/cluster/tasks/main.yml rename to roles/obj_configure/tasks/main.yml diff --git a/roles/scale_object/cluster/templates/obj_passwd.j2 b/roles/obj_configure/templates/obj_passwd.j2 similarity index 100% rename from roles/scale_object/cluster/templates/obj_passwd.j2 rename to roles/obj_configure/templates/obj_passwd.j2 diff --git a/roles/scale_object/cluster/vars/main.yml b/roles/obj_configure/vars/main.yml similarity index 100% rename from roles/scale_object/cluster/vars/main.yml rename to roles/obj_configure/vars/main.yml diff --git a/roles/scale_object/node/defaults/main.yml b/roles/obj_install/defaults/main.yml similarity index 100% rename from roles/scale_object/node/defaults/main.yml rename to roles/obj_install/defaults/main.yml diff --git a/roles/scale_object/node/handlers/main.yml b/roles/obj_install/handlers/main.yml similarity index 100% rename from roles/scale_object/node/handlers/main.yml rename to roles/obj_install/handlers/main.yml diff --git a/roles/scale_object/node/meta/main.yml b/roles/obj_install/meta/main.yml similarity index 100% rename from roles/scale_object/node/meta/main.yml rename to roles/obj_install/meta/main.yml diff --git a/roles/scale_object/node/tasks/install.yml b/roles/obj_install/tasks/install.yml similarity index 100% rename from roles/scale_object/node/tasks/install.yml rename to roles/obj_install/tasks/install.yml diff --git a/roles/scale_object/node/tasks/install_dir_pkg.yml b/roles/obj_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_object/node/tasks/install_dir_pkg.yml rename to roles/obj_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_object/node/tasks/install_local_pkg.yml b/roles/obj_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_object/node/tasks/install_local_pkg.yml rename to roles/obj_install/tasks/install_local_pkg.yml diff --git a/roles/scale_object/node/tasks/install_pmswift.yml b/roles/obj_install/tasks/install_pmswift.yml similarity index 100% rename from roles/scale_object/node/tasks/install_pmswift.yml rename to roles/obj_install/tasks/install_pmswift.yml diff --git a/roles/scale_object/node/tasks/install_remote_pkg.yml b/roles/obj_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_object/node/tasks/install_remote_pkg.yml rename to roles/obj_install/tasks/install_remote_pkg.yml diff --git a/roles/scale_object/node/tasks/install_repository.yml b/roles/obj_install/tasks/install_repository.yml similarity index 100% rename from roles/scale_object/node/tasks/install_repository.yml rename to roles/obj_install/tasks/install_repository.yml diff --git a/roles/scale_object/node/tasks/main.yml b/roles/obj_install/tasks/main.yml similarity index 100% rename from roles/scale_object/node/tasks/main.yml rename to roles/obj_install/tasks/main.yml diff --git a/roles/scale_object/node/tasks/yum/install.yml b/roles/obj_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_object/node/tasks/yum/install.yml rename to roles/obj_install/tasks/yum/install.yml diff --git a/roles/scale_object/node/vars/main.yml b/roles/obj_install/vars/main.yml similarity index 100% rename from roles/scale_object/node/vars/main.yml rename to roles/obj_install/vars/main.yml diff --git a/roles/scale_object/precheck/default/main.yml b/roles/obj_prepare/default/main.yml similarity index 100% rename from roles/scale_object/precheck/default/main.yml rename to roles/obj_prepare/default/main.yml diff --git a/roles/scale_object/precheck/meta/main.yml b/roles/obj_prepare/meta/main.yml similarity index 100% rename from roles/scale_object/precheck/meta/main.yml rename to roles/obj_prepare/meta/main.yml diff --git a/roles/scale_object/precheck/tasks/check.yml b/roles/obj_prepare/tasks/check.yml similarity index 100% rename from roles/scale_object/precheck/tasks/check.yml rename to roles/obj_prepare/tasks/check.yml diff --git a/roles/scale_object/precheck/tasks/inventory_check.yml b/roles/obj_prepare/tasks/inventory_check.yml similarity index 100% rename from roles/scale_object/precheck/tasks/inventory_check.yml rename to roles/obj_prepare/tasks/inventory_check.yml diff --git a/roles/scale_object/precheck/tasks/main.yml b/roles/obj_prepare/tasks/main.yml similarity index 100% rename from roles/scale_object/precheck/tasks/main.yml rename to roles/obj_prepare/tasks/main.yml diff --git a/roles/scale_object/upgrade/defaults/main.yml b/roles/obj_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_object/upgrade/defaults/main.yml rename to roles/obj_upgrade/defaults/main.yml diff --git a/roles/scale_object/upgrade/handlers/main.yml b/roles/obj_upgrade/handlers/main.yml similarity index 100% rename from roles/scale_object/upgrade/handlers/main.yml rename to roles/obj_upgrade/handlers/main.yml diff --git a/roles/scale_object/upgrade/meta/main.yml b/roles/obj_upgrade/meta/main.yml similarity index 100% rename from roles/scale_object/upgrade/meta/main.yml rename to roles/obj_upgrade/meta/main.yml diff --git a/roles/scale_object/upgrade/tasks/install.yml b/roles/obj_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install.yml rename to roles/obj_upgrade/tasks/install.yml diff --git a/roles/scale_object/upgrade/tasks/install_dir_pkg.yml b/roles/obj_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install_dir_pkg.yml rename to roles/obj_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/scale_object/upgrade/tasks/install_local_pkg.yml b/roles/obj_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install_local_pkg.yml rename to roles/obj_upgrade/tasks/install_local_pkg.yml diff --git a/roles/scale_object/upgrade/tasks/install_pmswift.yml b/roles/obj_upgrade/tasks/install_pmswift.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install_pmswift.yml rename to roles/obj_upgrade/tasks/install_pmswift.yml diff --git a/roles/scale_object/upgrade/tasks/install_remote_pkg.yml b/roles/obj_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install_remote_pkg.yml rename to roles/obj_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/scale_object/upgrade/tasks/install_repository.yml b/roles/obj_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install_repository.yml rename to roles/obj_upgrade/tasks/install_repository.yml diff --git a/roles/scale_object/upgrade/tasks/main.yml b/roles/obj_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/main.yml rename to roles/obj_upgrade/tasks/main.yml diff --git a/roles/scale_object/upgrade/tasks/yum/install.yml b/roles/obj_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/yum/install.yml rename to roles/obj_upgrade/tasks/yum/install.yml diff --git a/roles/scale_object/upgrade/vars/main.yml b/roles/obj_upgrade/vars/main.yml similarity index 100% rename from roles/scale_object/upgrade/vars/main.yml rename to roles/obj_upgrade/vars/main.yml diff --git a/roles/scale_object/postcheck/meta/main.yml b/roles/obj_verify/meta/main.yml similarity index 100% rename from roles/scale_object/postcheck/meta/main.yml rename to roles/obj_verify/meta/main.yml diff --git a/roles/scale_object/postcheck/tasks/check.yml b/roles/obj_verify/tasks/check.yml similarity index 100% rename from roles/scale_object/postcheck/tasks/check.yml rename to roles/obj_verify/tasks/check.yml diff --git a/roles/scale_object/postcheck/tasks/main.yml b/roles/obj_verify/tasks/main.yml similarity index 100% rename from roles/scale_object/postcheck/tasks/main.yml rename to roles/obj_verify/tasks/main.yml diff --git a/roles/scale_object/postcheck/vars/main.yml b/roles/obj_verify/vars/main.yml similarity index 100% rename from roles/scale_object/postcheck/vars/main.yml rename to roles/obj_verify/vars/main.yml diff --git a/roles/zimon/cluster/defaults/main.yml b/roles/perfmon_configure/defaults/main.yml similarity index 100% rename from roles/zimon/cluster/defaults/main.yml rename to roles/perfmon_configure/defaults/main.yml diff --git a/roles/zimon/cluster/meta/main.yml b/roles/perfmon_configure/meta/main.yml similarity index 100% rename from roles/zimon/cluster/meta/main.yml rename to roles/perfmon_configure/meta/main.yml diff --git a/roles/zimon/cluster/tasks/configure.yml b/roles/perfmon_configure/tasks/configure.yml similarity index 100% rename from roles/zimon/cluster/tasks/configure.yml rename to roles/perfmon_configure/tasks/configure.yml diff --git a/roles/zimon/cluster/tasks/main.yml b/roles/perfmon_configure/tasks/main.yml similarity index 100% rename from roles/zimon/cluster/tasks/main.yml rename to roles/perfmon_configure/tasks/main.yml diff --git a/roles/zimon/node/defaults/main.yml b/roles/perfmon_install/defaults/main.yml similarity index 100% rename from roles/zimon/node/defaults/main.yml rename to roles/perfmon_install/defaults/main.yml diff --git a/roles/zimon/node/meta/main.yml b/roles/perfmon_install/meta/main.yml similarity index 100% rename from roles/zimon/node/meta/main.yml rename to roles/perfmon_install/meta/main.yml diff --git a/roles/zimon/node/tasks/apt/install.yml b/roles/perfmon_install/tasks/apt/install.yml similarity index 100% rename from roles/zimon/node/tasks/apt/install.yml rename to roles/perfmon_install/tasks/apt/install.yml diff --git a/roles/zimon/node/tasks/install.yml b/roles/perfmon_install/tasks/install.yml similarity index 100% rename from roles/zimon/node/tasks/install.yml rename to roles/perfmon_install/tasks/install.yml diff --git a/roles/zimon/node/tasks/install_dir_pkg.yml b/roles/perfmon_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/zimon/node/tasks/install_dir_pkg.yml rename to roles/perfmon_install/tasks/install_dir_pkg.yml diff --git a/roles/zimon/node/tasks/install_local_pkg.yml b/roles/perfmon_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/zimon/node/tasks/install_local_pkg.yml rename to roles/perfmon_install/tasks/install_local_pkg.yml diff --git a/roles/zimon/node/tasks/install_remote_pkg.yml b/roles/perfmon_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/zimon/node/tasks/install_remote_pkg.yml rename to roles/perfmon_install/tasks/install_remote_pkg.yml diff --git a/roles/zimon/node/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml similarity index 100% rename from roles/zimon/node/tasks/install_repository.yml rename to roles/perfmon_install/tasks/install_repository.yml diff --git a/roles/zimon/node/tasks/main.yml b/roles/perfmon_install/tasks/main.yml similarity index 100% rename from roles/zimon/node/tasks/main.yml rename to roles/perfmon_install/tasks/main.yml diff --git a/roles/zimon/node/tasks/yum/install.yml b/roles/perfmon_install/tasks/yum/install.yml similarity index 100% rename from roles/zimon/node/tasks/yum/install.yml rename to roles/perfmon_install/tasks/yum/install.yml diff --git a/roles/zimon/node/tasks/zypper/install.yml b/roles/perfmon_install/tasks/zypper/install.yml similarity index 100% rename from roles/zimon/node/tasks/zypper/install.yml rename to roles/perfmon_install/tasks/zypper/install.yml diff --git a/roles/zimon/node/vars/main.yml b/roles/perfmon_install/vars/main.yml similarity index 100% rename from roles/zimon/node/vars/main.yml rename to roles/perfmon_install/vars/main.yml diff --git a/roles/zimon/precheck/defaults/main.yml b/roles/perfmon_prepare/defaults/main.yml similarity index 100% rename from roles/zimon/precheck/defaults/main.yml rename to roles/perfmon_prepare/defaults/main.yml diff --git a/roles/zimon/precheck/meta/main.yml b/roles/perfmon_prepare/meta/main.yml similarity index 100% rename from roles/zimon/precheck/meta/main.yml rename to roles/perfmon_prepare/meta/main.yml diff --git a/roles/zimon/precheck/tasks/main.yml b/roles/perfmon_prepare/tasks/main.yml similarity index 100% rename from roles/zimon/precheck/tasks/main.yml rename to roles/perfmon_prepare/tasks/main.yml diff --git a/roles/zimon/precheck/vars/main.yml b/roles/perfmon_prepare/vars/main.yml similarity index 100% rename from roles/zimon/precheck/vars/main.yml rename to roles/perfmon_prepare/vars/main.yml diff --git a/roles/zimon/upgrade/defaults/main.yml b/roles/perfmon_upgrade/defaults/main.yml similarity index 100% rename from roles/zimon/upgrade/defaults/main.yml rename to roles/perfmon_upgrade/defaults/main.yml diff --git a/roles/zimon/upgrade/meta/main.yml b/roles/perfmon_upgrade/meta/main.yml similarity index 100% rename from roles/zimon/upgrade/meta/main.yml rename to roles/perfmon_upgrade/meta/main.yml diff --git a/roles/zimon/upgrade/tasks/apt/install.yml b/roles/perfmon_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/apt/install.yml rename to roles/perfmon_upgrade/tasks/apt/install.yml diff --git a/roles/zimon/upgrade/tasks/install.yml b/roles/perfmon_upgrade/tasks/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install.yml rename to roles/perfmon_upgrade/tasks/install.yml diff --git a/roles/zimon/upgrade/tasks/install_dir_pkg.yml b/roles/perfmon_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_dir_pkg.yml rename to roles/perfmon_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/zimon/upgrade/tasks/install_local_pkg.yml b/roles/perfmon_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_local_pkg.yml rename to roles/perfmon_upgrade/tasks/install_local_pkg.yml diff --git a/roles/zimon/upgrade/tasks/install_remote_pkg.yml b/roles/perfmon_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_remote_pkg.yml rename to roles/perfmon_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/zimon/upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_repository.yml rename to roles/perfmon_upgrade/tasks/install_repository.yml diff --git a/roles/zimon/upgrade/tasks/main.yml b/roles/perfmon_upgrade/tasks/main.yml similarity index 100% rename from roles/zimon/upgrade/tasks/main.yml rename to roles/perfmon_upgrade/tasks/main.yml diff --git a/roles/zimon/upgrade/tasks/yum/install.yml b/roles/perfmon_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/yum/install.yml rename to roles/perfmon_upgrade/tasks/yum/install.yml diff --git a/roles/zimon/upgrade/tasks/zypper/install.yml b/roles/perfmon_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/zypper/install.yml rename to roles/perfmon_upgrade/tasks/zypper/install.yml diff --git a/roles/zimon/upgrade/vars/main.yml b/roles/perfmon_upgrade/vars/main.yml similarity index 100% rename from roles/zimon/upgrade/vars/main.yml rename to roles/perfmon_upgrade/vars/main.yml diff --git a/roles/zimon/postcheck/defaults/main.yml b/roles/perfmon_verify/defaults/main.yml similarity index 100% rename from roles/zimon/postcheck/defaults/main.yml rename to roles/perfmon_verify/defaults/main.yml diff --git a/roles/zimon/postcheck/meta/main.yml b/roles/perfmon_verify/meta/main.yml similarity index 100% rename from roles/zimon/postcheck/meta/main.yml rename to roles/perfmon_verify/meta/main.yml diff --git a/roles/zimon/postcheck/tasks/main.yml b/roles/perfmon_verify/tasks/main.yml similarity index 100% rename from roles/zimon/postcheck/tasks/main.yml rename to roles/perfmon_verify/tasks/main.yml diff --git a/roles/remote_mount/.yamllint b/roles/remotemount_configure/.yamllint similarity index 100% rename from roles/remote_mount/.yamllint rename to roles/remotemount_configure/.yamllint diff --git a/roles/remote_mount/README.md b/roles/remotemount_configure/README.md similarity index 100% rename from roles/remote_mount/README.md rename to roles/remotemount_configure/README.md diff --git a/roles/remote_mount/defaults/main.yml b/roles/remotemount_configure/defaults/main.yml similarity index 100% rename from roles/remote_mount/defaults/main.yml rename to roles/remotemount_configure/defaults/main.yml diff --git a/roles/remote_mount/handlers/main.yml b/roles/remotemount_configure/handlers/main.yml similarity index 100% rename from roles/remote_mount/handlers/main.yml rename to roles/remotemount_configure/handlers/main.yml diff --git a/roles/remote_mount/meta/main.yml b/roles/remotemount_configure/meta/main.yml similarity index 100% rename from roles/remote_mount/meta/main.yml rename to roles/remotemount_configure/meta/main.yml diff --git a/roles/remote_mount/molecule/default/INSTALL.rst b/roles/remotemount_configure/molecule/default/INSTALL.rst similarity index 100% rename from roles/remote_mount/molecule/default/INSTALL.rst rename to roles/remotemount_configure/molecule/default/INSTALL.rst diff --git a/roles/remote_mount/molecule/default/converge.yml b/roles/remotemount_configure/molecule/default/converge.yml similarity index 100% rename from roles/remote_mount/molecule/default/converge.yml rename to roles/remotemount_configure/molecule/default/converge.yml diff --git a/roles/remote_mount/molecule/default/molecule.yml b/roles/remotemount_configure/molecule/default/molecule.yml similarity index 100% rename from roles/remote_mount/molecule/default/molecule.yml rename to roles/remotemount_configure/molecule/default/molecule.yml diff --git a/roles/remote_mount/molecule/default/verify.yml b/roles/remotemount_configure/molecule/default/verify.yml similarity index 100% rename from roles/remote_mount/molecule/default/verify.yml rename to roles/remotemount_configure/molecule/default/verify.yml diff --git a/roles/remote_mount/tasks/cleanup_filesystem_api_cli.yml b/roles/remotemount_configure/tasks/cleanup_filesystem_api_cli.yml similarity index 100% rename from roles/remote_mount/tasks/cleanup_filesystem_api_cli.yml rename to roles/remotemount_configure/tasks/cleanup_filesystem_api_cli.yml diff --git a/roles/remote_mount/tasks/cleanup_filesystems.yml b/roles/remotemount_configure/tasks/cleanup_filesystems.yml similarity index 100% rename from roles/remote_mount/tasks/cleanup_filesystems.yml rename to roles/remotemount_configure/tasks/cleanup_filesystems.yml diff --git a/roles/remote_mount/tasks/cleanup_remote_mount.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml similarity index 100% rename from roles/remote_mount/tasks/cleanup_remote_mount.yml rename to roles/remotemount_configure/tasks/cleanup_remote_mount.yml diff --git a/roles/remote_mount/tasks/cleanup_remote_mount_api_cli.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml similarity index 100% rename from roles/remote_mount/tasks/cleanup_remote_mount_api_cli.yml rename to roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml diff --git a/roles/remote_mount/tasks/delete_remote_cluster.yml b/roles/remotemount_configure/tasks/delete_remote_cluster.yml similarity index 100% rename from roles/remote_mount/tasks/delete_remote_cluster.yml rename to roles/remotemount_configure/tasks/delete_remote_cluster.yml diff --git a/roles/remote_mount/tasks/main.yml b/roles/remotemount_configure/tasks/main.yml similarity index 100% rename from roles/remote_mount/tasks/main.yml rename to roles/remotemount_configure/tasks/main.yml diff --git a/roles/remote_mount/tasks/mount_filesystem_api_cli.yml b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml similarity index 100% rename from roles/remote_mount/tasks/mount_filesystem_api_cli.yml rename to roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml diff --git a/roles/remote_mount/tasks/mount_filesystems.yml b/roles/remotemount_configure/tasks/mount_filesystems.yml similarity index 100% rename from roles/remote_mount/tasks/mount_filesystems.yml rename to roles/remotemount_configure/tasks/mount_filesystems.yml diff --git a/roles/remote_mount/tasks/precheck.yml b/roles/remotemount_configure/tasks/precheck.yml similarity index 100% rename from roles/remote_mount/tasks/precheck.yml rename to roles/remotemount_configure/tasks/precheck.yml diff --git a/roles/remote_mount/tasks/remotecluster.yml b/roles/remotemount_configure/tasks/remotecluster.yml similarity index 100% rename from roles/remote_mount/tasks/remotecluster.yml rename to roles/remotemount_configure/tasks/remotecluster.yml diff --git a/roles/remote_mount/tasks/remotecluster_api_cli.yml b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml similarity index 100% rename from roles/remote_mount/tasks/remotecluster_api_cli.yml rename to roles/remotemount_configure/tasks/remotecluster_api_cli.yml diff --git a/roles/scale_fileauditlogging/upgrade/tests/inventory b/roles/remotemount_configure/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tests/inventory rename to roles/remotemount_configure/tests/inventory diff --git a/roles/remote_mount/tests/test.yml b/roles/remotemount_configure/tests/test.yml similarity index 100% rename from roles/remote_mount/tests/test.yml rename to roles/remotemount_configure/tests/test.yml diff --git a/roles/remote_mount/vars/main.yml b/roles/remotemount_configure/vars/main.yml similarity index 100% rename from roles/remote_mount/vars/main.yml rename to roles/remotemount_configure/vars/main.yml diff --git a/roles/smb/cluster/defaults/main.yml b/roles/smb_configure/defaults/main.yml similarity index 100% rename from roles/smb/cluster/defaults/main.yml rename to roles/smb_configure/defaults/main.yml diff --git a/roles/smb/cluster/meta/main.yml b/roles/smb_configure/meta/main.yml similarity index 100% rename from roles/smb/cluster/meta/main.yml rename to roles/smb_configure/meta/main.yml diff --git a/roles/smb/cluster/tasks/configure.yml b/roles/smb_configure/tasks/configure.yml similarity index 100% rename from roles/smb/cluster/tasks/configure.yml rename to roles/smb_configure/tasks/configure.yml diff --git a/roles/smb/cluster/tasks/main.yml b/roles/smb_configure/tasks/main.yml similarity index 100% rename from roles/smb/cluster/tasks/main.yml rename to roles/smb_configure/tasks/main.yml diff --git a/roles/smb/cluster/vars/main.yml b/roles/smb_configure/vars/main.yml similarity index 100% rename from roles/smb/cluster/vars/main.yml rename to roles/smb_configure/vars/main.yml diff --git a/roles/smb/node/defaults/main.yml b/roles/smb_install/defaults/main.yml similarity index 100% rename from roles/smb/node/defaults/main.yml rename to roles/smb_install/defaults/main.yml diff --git a/roles/smb/node/handlers/main.yml b/roles/smb_install/handlers/main.yml similarity index 100% rename from roles/smb/node/handlers/main.yml rename to roles/smb_install/handlers/main.yml diff --git a/roles/smb/node/meta/main.yml b/roles/smb_install/meta/main.yml similarity index 100% rename from roles/smb/node/meta/main.yml rename to roles/smb_install/meta/main.yml diff --git a/roles/smb/node/tasks/apt/install.yml b/roles/smb_install/tasks/apt/install.yml similarity index 100% rename from roles/smb/node/tasks/apt/install.yml rename to roles/smb_install/tasks/apt/install.yml diff --git a/roles/smb/node/tasks/install.yml b/roles/smb_install/tasks/install.yml similarity index 100% rename from roles/smb/node/tasks/install.yml rename to roles/smb_install/tasks/install.yml diff --git a/roles/smb/node/tasks/install_dir_pkg.yml b/roles/smb_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/smb/node/tasks/install_dir_pkg.yml rename to roles/smb_install/tasks/install_dir_pkg.yml diff --git a/roles/smb/node/tasks/install_local_pkg.yml b/roles/smb_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/smb/node/tasks/install_local_pkg.yml rename to roles/smb_install/tasks/install_local_pkg.yml diff --git a/roles/smb/node/tasks/install_remote_pkg.yml b/roles/smb_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/smb/node/tasks/install_remote_pkg.yml rename to roles/smb_install/tasks/install_remote_pkg.yml diff --git a/roles/smb/node/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml similarity index 100% rename from roles/smb/node/tasks/install_repository.yml rename to roles/smb_install/tasks/install_repository.yml diff --git a/roles/smb/node/tasks/main.yml b/roles/smb_install/tasks/main.yml similarity index 100% rename from roles/smb/node/tasks/main.yml rename to roles/smb_install/tasks/main.yml diff --git a/roles/smb/node/tasks/yum/install.yml b/roles/smb_install/tasks/yum/install.yml similarity index 100% rename from roles/smb/node/tasks/yum/install.yml rename to roles/smb_install/tasks/yum/install.yml diff --git a/roles/smb/node/tasks/zypper/install.yml b/roles/smb_install/tasks/zypper/install.yml similarity index 100% rename from roles/smb/node/tasks/zypper/install.yml rename to roles/smb_install/tasks/zypper/install.yml diff --git a/roles/smb/node/vars/main.yml b/roles/smb_install/vars/main.yml similarity index 100% rename from roles/smb/node/vars/main.yml rename to roles/smb_install/vars/main.yml diff --git a/roles/smb/postcheck/meta/main.yml b/roles/smb_prepare/meta/main.yml similarity index 100% rename from roles/smb/postcheck/meta/main.yml rename to roles/smb_prepare/meta/main.yml diff --git a/roles/smb/precheck/tasks/check.yml b/roles/smb_prepare/tasks/check.yml similarity index 100% rename from roles/smb/precheck/tasks/check.yml rename to roles/smb_prepare/tasks/check.yml diff --git a/roles/smb/precheck/tasks/main.yml b/roles/smb_prepare/tasks/main.yml similarity index 100% rename from roles/smb/precheck/tasks/main.yml rename to roles/smb_prepare/tasks/main.yml diff --git a/roles/smb/upgrade/defaults/main.yml b/roles/smb_upgrade/defaults/main.yml similarity index 100% rename from roles/smb/upgrade/defaults/main.yml rename to roles/smb_upgrade/defaults/main.yml diff --git a/roles/smb/upgrade/handlers/main.yml b/roles/smb_upgrade/handlers/main.yml similarity index 100% rename from roles/smb/upgrade/handlers/main.yml rename to roles/smb_upgrade/handlers/main.yml diff --git a/roles/smb/upgrade/meta/main.yml b/roles/smb_upgrade/meta/main.yml similarity index 100% rename from roles/smb/upgrade/meta/main.yml rename to roles/smb_upgrade/meta/main.yml diff --git a/roles/smb/upgrade/tasks/apt/install.yml b/roles/smb_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/apt/install.yml rename to roles/smb_upgrade/tasks/apt/install.yml diff --git a/roles/smb/upgrade/tasks/install.yml b/roles/smb_upgrade/tasks/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/install.yml rename to roles/smb_upgrade/tasks/install.yml diff --git a/roles/smb/upgrade/tasks/install_dir_pkg.yml b/roles/smb_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/smb/upgrade/tasks/install_dir_pkg.yml rename to roles/smb_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/smb/upgrade/tasks/install_local_pkg.yml b/roles/smb_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/smb/upgrade/tasks/install_local_pkg.yml rename to roles/smb_upgrade/tasks/install_local_pkg.yml diff --git a/roles/smb/upgrade/tasks/install_remote_pkg.yml b/roles/smb_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/smb/upgrade/tasks/install_remote_pkg.yml rename to roles/smb_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/smb/upgrade/tasks/install_repository.yml b/roles/smb_upgrade/tasks/install_repository.yml similarity index 100% rename from roles/smb/upgrade/tasks/install_repository.yml rename to roles/smb_upgrade/tasks/install_repository.yml diff --git a/roles/smb/upgrade/tasks/main.yml b/roles/smb_upgrade/tasks/main.yml similarity index 100% rename from roles/smb/upgrade/tasks/main.yml rename to roles/smb_upgrade/tasks/main.yml diff --git a/roles/smb/upgrade/tasks/yum/install.yml b/roles/smb_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/yum/install.yml rename to roles/smb_upgrade/tasks/yum/install.yml diff --git a/roles/smb/upgrade/tasks/zypper/install.yml b/roles/smb_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/zypper/install.yml rename to roles/smb_upgrade/tasks/zypper/install.yml diff --git a/roles/smb/upgrade/vars/main.yml b/roles/smb_upgrade/vars/main.yml similarity index 100% rename from roles/smb/upgrade/vars/main.yml rename to roles/smb_upgrade/vars/main.yml diff --git a/roles/smb/postcheck/defaults/main.yml b/roles/smb_verify/defaults/main.yml similarity index 100% rename from roles/smb/postcheck/defaults/main.yml rename to roles/smb_verify/defaults/main.yml diff --git a/roles/smb/precheck/meta/main.yml b/roles/smb_verify/meta/main.yml similarity index 100% rename from roles/smb/precheck/meta/main.yml rename to roles/smb_verify/meta/main.yml diff --git a/roles/smb/postcheck/tasks/check.yml b/roles/smb_verify/tasks/check.yml similarity index 100% rename from roles/smb/postcheck/tasks/check.yml rename to roles/smb_verify/tasks/check.yml diff --git a/roles/smb/postcheck/tasks/main.yml b/roles/smb_verify/tasks/main.yml similarity index 100% rename from roles/smb/postcheck/tasks/main.yml rename to roles/smb_verify/tasks/main.yml diff --git a/roles/smb/postcheck/vars/main.yml b/roles/smb_verify/vars/main.yml similarity index 100% rename from roles/smb/postcheck/vars/main.yml rename to roles/smb_verify/vars/main.yml From a16aeb46d977b7b4b87ab6440a2bd6fc07a7f066 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Sun, 31 Oct 2021 20:47:45 +0100 Subject: [PATCH 009/113] Add simple migration script Signed-off-by: Achim Christ --- migrate.sh | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100755 migrate.sh diff --git a/migrate.sh b/migrate.sh new file mode 100755 index 00000000..2a5f3f6a --- /dev/null +++ b/migrate.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Replaces old role names with new role names in any text file + +usage () { + echo "Usage: $0 filename" + exit 1 +} + +[ "$#" -eq 1 ] || usage +[ -r "$1" ] || usage + +cp ${1} ${1}.bak + +sed -i ' +s,callhome/cluster,callhome_configure,g +s,callhome/node,callhome_install,g +s,callhome/postcheck,callhome_verify,g +s,callhome/precheck,callhome_prepare,g +s,core/cluster,core_configure,g +s,core/common,core_common,g +s,core/node,core_install,g +s,core/postcheck,core_verify,g +s,core/precheck,core_prepare,g +s,core/upgrade,core_upgrade,g +s,gui/cluster,gui_configure,g +s,gui/node,gui_install,g +s,gui/postcheck,gui_verify,g +s,gui/precheck,gui_prepare,g +s,gui/upgrade,gui_upgrade,g +s,nfs/cluster,nfs_configure,g +s,nfs/common,ces_common,g +s,nfs/node,nfs_install,g +s,nfs/postcheck,nfs_verify,g +s,nfs/precheck,nfs_prepare,g +s,nfs/upgrade,nfs_upgrade,g +s,remote_mount/,remotemount_configure,g +s,scale_auth/upgrade,auth_upgrade,g +s,scale_ece/cluster,ece_configure,g +s,scale_ece/node,ece_install,g +s,scale_ece/precheck,ece_prepare,g +s,scale_ece/upgrade,ece_upgrade,g +s,scale_fileauditlogging/cluster,fal_configure,g +s,scale_fileauditlogging/node,fal_install,g +s,scale_fileauditlogging/postcheck,fal_verify,g +s,scale_fileauditlogging/precheck,fal_prepare,g +s,scale_fileauditlogging/upgrade,fal_upgrade,g +s,scale_hdfs/cluster,hdfs_configure,g +s,scale_hdfs/node,hdfs_install,g +s,scale_hdfs/postcheck,hdfs_verify,g +s,scale_hdfs/precheck,hdfs_prepare,g +s,scale_hdfs/upgrade,hdfs_upgrade,g +s,scale_hpt/node,afm_cos_install,g +s,scale_hpt/postcheck,afm_cos_verify,g +s,scale_hpt/precheck,afm_cos_prepare,g +s,scale_hpt/upgrade,afm_cos_upgrade,g +s,scale_object/cluster,obj_configure,g +s,scale_object/node,obj_install,g +s,scale_object/postcheck,obj_verify,g +s,scale_object/precheck,obj_prepare,g +s,scale_object/upgrade,obj_upgrade,g +s,smb/cluster,smb_configure,g +s,smb/node,smb_install,g +s,smb/postcheck,smb_verify,g +s,smb/precheck,smb_prepare,g +s,smb/upgrade,smb_upgrade,g +s,zimon/cluster,perfmon_configure,g +s,zimon/node,perfmon_install,g +s,zimon/postcheck,perfmon_verify,g +s,zimon/precheck,perfmon_prepare,g +s,zimon/upgrade,perfmon_upgrade,g +' $1 + +exit 0 From 1048d074dcc49d52b64007f6a87f0eb9cc34fdac Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Sun, 31 Oct 2021 20:53:40 +0100 Subject: [PATCH 010/113] Update dependencies to reflect new role names Signed-off-by: Achim Christ --- roles/afm_cos_install/meta/main.yml | 2 +- roles/afm_cos_upgrade/meta/main.yml | 2 +- roles/auth_upgrade/meta/main.yml | 2 +- roles/callhome_install/meta/main.yml | 2 +- roles/ces_common/meta/main.yml | 2 +- roles/ece_install/meta/main.yml | 2 +- roles/ece_upgrade/meta/main.yml | 2 +- roles/fal_install/meta/main.yml | 2 +- roles/fal_upgrade/meta/main.yml | 2 +- roles/gui_configure/meta/main.yml | 4 ++-- roles/gui_install/meta/main.yml | 4 ++-- roles/gui_prepare/meta/main.yml | 2 +- roles/gui_upgrade/meta/main.yml | 2 +- roles/hdfs_configure/meta/main.yml | 2 +- roles/hdfs_install/meta/main.yml | 4 ++-- roles/hdfs_upgrade/meta/main.yml | 2 +- roles/nfs_configure/meta/main.yml | 4 ++-- roles/nfs_install/meta/main.yml | 4 ++-- roles/nfs_upgrade/meta/main.yml | 2 +- roles/obj_configure/meta/main.yml | 4 ++-- roles/obj_install/meta/main.yml | 4 ++-- roles/obj_prepare/meta/main.yml | 2 +- roles/obj_upgrade/meta/main.yml | 4 ++-- roles/perfmon_configure/meta/main.yml | 2 +- roles/perfmon_install/meta/main.yml | 2 +- roles/perfmon_prepare/meta/main.yml | 2 +- roles/perfmon_upgrade/meta/main.yml | 2 +- roles/smb_configure/meta/main.yml | 2 +- roles/smb_install/meta/main.yml | 4 ++-- roles/smb_upgrade/meta/main.yml | 2 +- 30 files changed, 39 insertions(+), 39 deletions(-) diff --git a/roles/afm_cos_install/meta/main.yml b/roles/afm_cos_install/meta/main.yml index f79ac190..e047df6f 100644 --- a/roles/afm_cos_install/meta/main.yml +++ b/roles/afm_cos_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/afm_cos_upgrade/meta/main.yml b/roles/afm_cos_upgrade/meta/main.yml index f79ac190..e047df6f 100644 --- a/roles/afm_cos_upgrade/meta/main.yml +++ b/roles/afm_cos_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/auth_upgrade/meta/main.yml b/roles/auth_upgrade/meta/main.yml index 13b86030..3c8520de 100644 --- a/roles/auth_upgrade/meta/main.yml +++ b/roles/auth_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/callhome_install/meta/main.yml b/roles/callhome_install/meta/main.yml index 13b86030..3c8520de 100755 --- a/roles/callhome_install/meta/main.yml +++ b/roles/callhome_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/ces_common/meta/main.yml b/roles/ces_common/meta/main.yml index 13b86030..3c8520de 100644 --- a/roles/ces_common/meta/main.yml +++ b/roles/ces_common/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/ece_install/meta/main.yml b/roles/ece_install/meta/main.yml index 1dca38dd..f30f8d92 100644 --- a/roles/ece_install/meta/main.yml +++ b/roles/ece_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/ece_upgrade/meta/main.yml b/roles/ece_upgrade/meta/main.yml index 1dca38dd..f30f8d92 100644 --- a/roles/ece_upgrade/meta/main.yml +++ b/roles/ece_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/fal_install/meta/main.yml b/roles/fal_install/meta/main.yml index ae6f91b6..286e6d34 100644 --- a/roles/fal_install/meta/main.yml +++ b/roles/fal_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/fal_upgrade/meta/main.yml b/roles/fal_upgrade/meta/main.yml index ae6f91b6..286e6d34 100644 --- a/roles/fal_upgrade/meta/main.yml +++ b/roles/fal_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/gui_configure/meta/main.yml b/roles/gui_configure/meta/main.yml index 4c56928f..64f22a1c 100644 --- a/roles/gui_configure/meta/main.yml +++ b/roles/gui_configure/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - zimon/cluster + - core_common + - perfmon_configure diff --git a/roles/gui_install/meta/main.yml b/roles/gui_install/meta/main.yml index 0e9ee42d..cf19cb59 100644 --- a/roles/gui_install/meta/main.yml +++ b/roles/gui_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - zimon/node + - core_common + - perfmon_install diff --git a/roles/gui_prepare/meta/main.yml b/roles/gui_prepare/meta/main.yml index 115fdc7e..f041ba72 100644 --- a/roles/gui_prepare/meta/main.yml +++ b/roles/gui_prepare/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/gui_upgrade/meta/main.yml b/roles/gui_upgrade/meta/main.yml index 115fdc7e..f041ba72 100644 --- a/roles/gui_upgrade/meta/main.yml +++ b/roles/gui_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/hdfs_configure/meta/main.yml b/roles/hdfs_configure/meta/main.yml index 2a32d5d7..44845236 100644 --- a/roles/hdfs_configure/meta/main.yml +++ b/roles/hdfs_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - nfs/common + - ces_common diff --git a/roles/hdfs_install/meta/main.yml b/roles/hdfs_install/meta/main.yml index 0918df39..fdd5b194 100644 --- a/roles/hdfs_install/meta/main.yml +++ b/roles/hdfs_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - scale_hdfs/precheck + - core_common + - hdfs_prepare diff --git a/roles/hdfs_upgrade/meta/main.yml b/roles/hdfs_upgrade/meta/main.yml index 13b86030..3c8520de 100644 --- a/roles/hdfs_upgrade/meta/main.yml +++ b/roles/hdfs_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/nfs_configure/meta/main.yml b/roles/nfs_configure/meta/main.yml index a7182ff2..0bca8737 100644 --- a/roles/nfs_configure/meta/main.yml +++ b/roles/nfs_configure/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - nfs/precheck - - nfs/common + - nfs_prepare + - ces_common diff --git a/roles/nfs_install/meta/main.yml b/roles/nfs_install/meta/main.yml index 3cec205a..73c5d0bf 100644 --- a/roles/nfs_install/meta/main.yml +++ b/roles/nfs_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - nfs/precheck + - core_common + - nfs_prepare diff --git a/roles/nfs_upgrade/meta/main.yml b/roles/nfs_upgrade/meta/main.yml index 13b86030..3c8520de 100644 --- a/roles/nfs_upgrade/meta/main.yml +++ b/roles/nfs_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/obj_configure/meta/main.yml b/roles/obj_configure/meta/main.yml index 7908f877..9269af13 100644 --- a/roles/obj_configure/meta/main.yml +++ b/roles/obj_configure/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - scale_object/precheck - - nfs/common + - obj_prepare + - ces_common diff --git a/roles/obj_install/meta/main.yml b/roles/obj_install/meta/main.yml index b85b4c6a..70e8029c 100644 --- a/roles/obj_install/meta/main.yml +++ b/roles/obj_install/meta/main.yml @@ -16,5 +16,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - scale_object/precheck - - nfs/common + - obj_prepare + - ces_common diff --git a/roles/obj_prepare/meta/main.yml b/roles/obj_prepare/meta/main.yml index 0c05ab16..6deaf73d 100644 --- a/roles/obj_prepare/meta/main.yml +++ b/roles/obj_prepare/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/obj_upgrade/meta/main.yml b/roles/obj_upgrade/meta/main.yml index 3f62aba0..57e103c4 100644 --- a/roles/obj_upgrade/meta/main.yml +++ b/roles/obj_upgrade/meta/main.yml @@ -16,5 +16,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - nfs/common + - core_common + - ces_common diff --git a/roles/perfmon_configure/meta/main.yml b/roles/perfmon_configure/meta/main.yml index e6e01961..66c36816 100644 --- a/roles/perfmon_configure/meta/main.yml +++ b/roles/perfmon_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/perfmon_install/meta/main.yml b/roles/perfmon_install/meta/main.yml index e6e01961..66c36816 100644 --- a/roles/perfmon_install/meta/main.yml +++ b/roles/perfmon_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/perfmon_prepare/meta/main.yml b/roles/perfmon_prepare/meta/main.yml index e6e01961..66c36816 100644 --- a/roles/perfmon_prepare/meta/main.yml +++ b/roles/perfmon_prepare/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/perfmon_upgrade/meta/main.yml b/roles/perfmon_upgrade/meta/main.yml index e6e01961..66c36816 100644 --- a/roles/perfmon_upgrade/meta/main.yml +++ b/roles/perfmon_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common diff --git a/roles/smb_configure/meta/main.yml b/roles/smb_configure/meta/main.yml index 2a32d5d7..44845236 100644 --- a/roles/smb_configure/meta/main.yml +++ b/roles/smb_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - nfs/common + - ces_common diff --git a/roles/smb_install/meta/main.yml b/roles/smb_install/meta/main.yml index e05eaf05..52e6903a 100644 --- a/roles/smb_install/meta/main.yml +++ b/roles/smb_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common - - smb/precheck + - core_common + - smb_prepare diff --git a/roles/smb_upgrade/meta/main.yml b/roles/smb_upgrade/meta/main.yml index 13b86030..3c8520de 100644 --- a/roles/smb_upgrade/meta/main.yml +++ b/roles/smb_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core/common + - core_common From b99de070da1688b2a30a984d41413ba7bd774ee3 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Sun, 31 Oct 2021 21:00:14 +0100 Subject: [PATCH 011/113] Update references to 'local' roles Signed-off-by: Achim Christ --- roles/callhome_verify/meta/main.yml | 2 +- roles/core_install/meta/main.yml | 2 +- roles/core_prepare/meta/main.yml | 2 +- roles/core_upgrade/meta/main.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/callhome_verify/meta/main.yml b/roles/callhome_verify/meta/main.yml index 73f72377..4ef9bf2e 100755 --- a/roles/callhome_verify/meta/main.yml +++ b/roles/callhome_verify/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: [] -# - common +# - core_common diff --git a/roles/core_install/meta/main.yml b/roles/core_install/meta/main.yml index 6ca5131a..8eaaff21 100644 --- a/roles/core_install/meta/main.yml +++ b/roles/core_install/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - common + - core_common diff --git a/roles/core_prepare/meta/main.yml b/roles/core_prepare/meta/main.yml index 6ca5131a..8eaaff21 100644 --- a/roles/core_prepare/meta/main.yml +++ b/roles/core_prepare/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - common + - core_common diff --git a/roles/core_upgrade/meta/main.yml b/roles/core_upgrade/meta/main.yml index 6ca5131a..8eaaff21 100644 --- a/roles/core_upgrade/meta/main.yml +++ b/roles/core_upgrade/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - common + - core_common From d2944922cbd688de33a39bdd3566048e1eb3f6f0 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Sun, 31 Oct 2021 21:18:47 +0100 Subject: [PATCH 012/113] Reference dependencies by their fully-qualified name Signed-off-by: Achim Christ --- roles/afm_cos_install/meta/main.yml | 2 +- roles/afm_cos_upgrade/meta/main.yml | 2 +- roles/auth_upgrade/meta/main.yml | 2 +- roles/callhome_configure/meta/main.yml | 2 +- roles/callhome_install/meta/main.yml | 2 +- roles/callhome_verify/meta/main.yml | 2 +- roles/ces_common/meta/main.yml | 2 +- roles/core_install/meta/main.yml | 2 +- roles/core_prepare/meta/main.yml | 2 +- roles/core_upgrade/meta/main.yml | 2 +- roles/ece_install/meta/main.yml | 2 +- roles/ece_upgrade/meta/main.yml | 2 +- roles/fal_install/meta/main.yml | 2 +- roles/fal_upgrade/meta/main.yml | 2 +- roles/gui_configure/meta/main.yml | 4 ++-- roles/gui_install/meta/main.yml | 4 ++-- roles/gui_prepare/meta/main.yml | 2 +- roles/gui_upgrade/meta/main.yml | 2 +- roles/hdfs_configure/meta/main.yml | 2 +- roles/hdfs_install/meta/main.yml | 4 ++-- roles/hdfs_upgrade/meta/main.yml | 2 +- roles/nfs_configure/meta/main.yml | 4 ++-- roles/nfs_install/meta/main.yml | 4 ++-- roles/nfs_upgrade/meta/main.yml | 2 +- roles/obj_configure/meta/main.yml | 4 ++-- roles/obj_install/meta/main.yml | 4 ++-- roles/obj_prepare/meta/main.yml | 2 +- roles/obj_upgrade/meta/main.yml | 4 ++-- roles/perfmon_configure/meta/main.yml | 2 +- roles/perfmon_install/meta/main.yml | 2 +- roles/perfmon_prepare/meta/main.yml | 2 +- roles/perfmon_upgrade/meta/main.yml | 2 +- roles/smb_configure/meta/main.yml | 2 +- roles/smb_install/meta/main.yml | 4 ++-- roles/smb_upgrade/meta/main.yml | 2 +- 35 files changed, 44 insertions(+), 44 deletions(-) diff --git a/roles/afm_cos_install/meta/main.yml b/roles/afm_cos_install/meta/main.yml index e047df6f..ed914ccc 100644 --- a/roles/afm_cos_install/meta/main.yml +++ b/roles/afm_cos_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/afm_cos_upgrade/meta/main.yml b/roles/afm_cos_upgrade/meta/main.yml index e047df6f..ed914ccc 100644 --- a/roles/afm_cos_upgrade/meta/main.yml +++ b/roles/afm_cos_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/auth_upgrade/meta/main.yml b/roles/auth_upgrade/meta/main.yml index 3c8520de..d32d632b 100644 --- a/roles/auth_upgrade/meta/main.yml +++ b/roles/auth_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/callhome_configure/meta/main.yml b/roles/callhome_configure/meta/main.yml index 04ac82ac..1b528927 100755 --- a/roles/callhome_configure/meta/main.yml +++ b/roles/callhome_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - precheck + - ibm.spectrum_scale.callhome_prepare diff --git a/roles/callhome_install/meta/main.yml b/roles/callhome_install/meta/main.yml index 3c8520de..d32d632b 100755 --- a/roles/callhome_install/meta/main.yml +++ b/roles/callhome_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/callhome_verify/meta/main.yml b/roles/callhome_verify/meta/main.yml index 4ef9bf2e..760f6876 100755 --- a/roles/callhome_verify/meta/main.yml +++ b/roles/callhome_verify/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: [] -# - core_common +# - ibm.spectrum_scale.core_common diff --git a/roles/ces_common/meta/main.yml b/roles/ces_common/meta/main.yml index 3c8520de..d32d632b 100644 --- a/roles/ces_common/meta/main.yml +++ b/roles/ces_common/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/core_install/meta/main.yml b/roles/core_install/meta/main.yml index 8eaaff21..017c7c5f 100644 --- a/roles/core_install/meta/main.yml +++ b/roles/core_install/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/core_prepare/meta/main.yml b/roles/core_prepare/meta/main.yml index 8eaaff21..017c7c5f 100644 --- a/roles/core_prepare/meta/main.yml +++ b/roles/core_prepare/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/core_upgrade/meta/main.yml b/roles/core_upgrade/meta/main.yml index 8eaaff21..017c7c5f 100644 --- a/roles/core_upgrade/meta/main.yml +++ b/roles/core_upgrade/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/ece_install/meta/main.yml b/roles/ece_install/meta/main.yml index f30f8d92..82795402 100644 --- a/roles/ece_install/meta/main.yml +++ b/roles/ece_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/ece_upgrade/meta/main.yml b/roles/ece_upgrade/meta/main.yml index f30f8d92..82795402 100644 --- a/roles/ece_upgrade/meta/main.yml +++ b/roles/ece_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/fal_install/meta/main.yml b/roles/fal_install/meta/main.yml index 286e6d34..ee84e753 100644 --- a/roles/fal_install/meta/main.yml +++ b/roles/fal_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/fal_upgrade/meta/main.yml b/roles/fal_upgrade/meta/main.yml index 286e6d34..ee84e753 100644 --- a/roles/fal_upgrade/meta/main.yml +++ b/roles/fal_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/gui_configure/meta/main.yml b/roles/gui_configure/meta/main.yml index 64f22a1c..3896f0fa 100644 --- a/roles/gui_configure/meta/main.yml +++ b/roles/gui_configure/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common - - perfmon_configure + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.perfmon_configure diff --git a/roles/gui_install/meta/main.yml b/roles/gui_install/meta/main.yml index cf19cb59..f8d2be23 100644 --- a/roles/gui_install/meta/main.yml +++ b/roles/gui_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common - - perfmon_install + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.perfmon_install diff --git a/roles/gui_prepare/meta/main.yml b/roles/gui_prepare/meta/main.yml index f041ba72..bdbab499 100644 --- a/roles/gui_prepare/meta/main.yml +++ b/roles/gui_prepare/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/gui_upgrade/meta/main.yml b/roles/gui_upgrade/meta/main.yml index f041ba72..bdbab499 100644 --- a/roles/gui_upgrade/meta/main.yml +++ b/roles/gui_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/hdfs_configure/meta/main.yml b/roles/hdfs_configure/meta/main.yml index 44845236..c42d702c 100644 --- a/roles/hdfs_configure/meta/main.yml +++ b/roles/hdfs_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - ces_common + - ibm.spectrum_scale.ces_common diff --git a/roles/hdfs_install/meta/main.yml b/roles/hdfs_install/meta/main.yml index fdd5b194..c75a53b2 100644 --- a/roles/hdfs_install/meta/main.yml +++ b/roles/hdfs_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common - - hdfs_prepare + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.hdfs_prepare diff --git a/roles/hdfs_upgrade/meta/main.yml b/roles/hdfs_upgrade/meta/main.yml index 3c8520de..d32d632b 100644 --- a/roles/hdfs_upgrade/meta/main.yml +++ b/roles/hdfs_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/nfs_configure/meta/main.yml b/roles/nfs_configure/meta/main.yml index 0bca8737..f2018326 100644 --- a/roles/nfs_configure/meta/main.yml +++ b/roles/nfs_configure/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - nfs_prepare - - ces_common + - ibm.spectrum_scale.nfs_prepare + - ibm.spectrum_scale.ces_common diff --git a/roles/nfs_install/meta/main.yml b/roles/nfs_install/meta/main.yml index 73c5d0bf..a451c3b9 100644 --- a/roles/nfs_install/meta/main.yml +++ b/roles/nfs_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common - - nfs_prepare + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.nfs_prepare diff --git a/roles/nfs_upgrade/meta/main.yml b/roles/nfs_upgrade/meta/main.yml index 3c8520de..d32d632b 100644 --- a/roles/nfs_upgrade/meta/main.yml +++ b/roles/nfs_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/obj_configure/meta/main.yml b/roles/obj_configure/meta/main.yml index 9269af13..b4409a0f 100644 --- a/roles/obj_configure/meta/main.yml +++ b/roles/obj_configure/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - obj_prepare - - ces_common + - ibm.spectrum_scale.obj_prepare + - ibm.spectrum_scale.ces_common diff --git a/roles/obj_install/meta/main.yml b/roles/obj_install/meta/main.yml index 70e8029c..f7e9bc91 100644 --- a/roles/obj_install/meta/main.yml +++ b/roles/obj_install/meta/main.yml @@ -16,5 +16,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - obj_prepare - - ces_common + - ibm.spectrum_scale.obj_prepare + - ibm.spectrum_scale.ces_common diff --git a/roles/obj_prepare/meta/main.yml b/roles/obj_prepare/meta/main.yml index 6deaf73d..4e20e076 100644 --- a/roles/obj_prepare/meta/main.yml +++ b/roles/obj_prepare/meta/main.yml @@ -16,4 +16,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/obj_upgrade/meta/main.yml b/roles/obj_upgrade/meta/main.yml index 57e103c4..3bbbe418 100644 --- a/roles/obj_upgrade/meta/main.yml +++ b/roles/obj_upgrade/meta/main.yml @@ -16,5 +16,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common - - ces_common + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.ces_common diff --git a/roles/perfmon_configure/meta/main.yml b/roles/perfmon_configure/meta/main.yml index 66c36816..b74ad2a1 100644 --- a/roles/perfmon_configure/meta/main.yml +++ b/roles/perfmon_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/perfmon_install/meta/main.yml b/roles/perfmon_install/meta/main.yml index 66c36816..b74ad2a1 100644 --- a/roles/perfmon_install/meta/main.yml +++ b/roles/perfmon_install/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/perfmon_prepare/meta/main.yml b/roles/perfmon_prepare/meta/main.yml index 66c36816..b74ad2a1 100644 --- a/roles/perfmon_prepare/meta/main.yml +++ b/roles/perfmon_prepare/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/perfmon_upgrade/meta/main.yml b/roles/perfmon_upgrade/meta/main.yml index 66c36816..b74ad2a1 100644 --- a/roles/perfmon_upgrade/meta/main.yml +++ b/roles/perfmon_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common diff --git a/roles/smb_configure/meta/main.yml b/roles/smb_configure/meta/main.yml index 44845236..c42d702c 100644 --- a/roles/smb_configure/meta/main.yml +++ b/roles/smb_configure/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - ces_common + - ibm.spectrum_scale.ces_common diff --git a/roles/smb_install/meta/main.yml b/roles/smb_install/meta/main.yml index 52e6903a..35961206 100644 --- a/roles/smb_install/meta/main.yml +++ b/roles/smb_install/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common - - smb_prepare + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.smb_prepare diff --git a/roles/smb_upgrade/meta/main.yml b/roles/smb_upgrade/meta/main.yml index 3c8520de..d32d632b 100644 --- a/roles/smb_upgrade/meta/main.yml +++ b/roles/smb_upgrade/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: galaxy_tags: [] dependencies: - - core_common + - ibm.spectrum_scale.core_common From 64f89dea8b98a383defd9e87082660b7760f83cb Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 1 Nov 2021 10:23:01 +0100 Subject: [PATCH 013/113] Move role-specific README's to docs/ directory Signed-off-by: Achim Christ --- CONTRIBUTING.md | 0 roles/callhome/README.md => docs/README.CALLHOME.md | 0 roles/gui/README.md => docs/README.GUI.md | 0 roles/scale_hdfs/README.md => docs/README.HDFS.md | 0 roles/nfs/README.md => docs/README.NFS.md | 0 roles/scale_object/README.md => docs/README.OBJ.md | 0 .../remotemount_configure/README.md => docs/README.REMOTEMOUNT.md | 0 roles/smb/README.md => docs/README.SMB.md | 0 VARIABLES.md => docs/VARIABLES.md | 0 9 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 CONTRIBUTING.md rename roles/callhome/README.md => docs/README.CALLHOME.md (100%) mode change 100755 => 100644 rename roles/gui/README.md => docs/README.GUI.md (100%) mode change 100755 => 100644 rename roles/scale_hdfs/README.md => docs/README.HDFS.md (100%) rename roles/nfs/README.md => docs/README.NFS.md (100%) rename roles/scale_object/README.md => docs/README.OBJ.md (100%) rename roles/remotemount_configure/README.md => docs/README.REMOTEMOUNT.md (100%) rename roles/smb/README.md => docs/README.SMB.md (100%) rename VARIABLES.md => docs/VARIABLES.md (100%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md old mode 100755 new mode 100644 diff --git a/roles/callhome/README.md b/docs/README.CALLHOME.md old mode 100755 new mode 100644 similarity index 100% rename from roles/callhome/README.md rename to docs/README.CALLHOME.md diff --git a/roles/gui/README.md b/docs/README.GUI.md old mode 100755 new mode 100644 similarity index 100% rename from roles/gui/README.md rename to docs/README.GUI.md diff --git a/roles/scale_hdfs/README.md b/docs/README.HDFS.md similarity index 100% rename from roles/scale_hdfs/README.md rename to docs/README.HDFS.md diff --git a/roles/nfs/README.md b/docs/README.NFS.md similarity index 100% rename from roles/nfs/README.md rename to docs/README.NFS.md diff --git a/roles/scale_object/README.md b/docs/README.OBJ.md similarity index 100% rename from roles/scale_object/README.md rename to docs/README.OBJ.md diff --git a/roles/remotemount_configure/README.md b/docs/README.REMOTEMOUNT.md similarity index 100% rename from roles/remotemount_configure/README.md rename to docs/README.REMOTEMOUNT.md diff --git a/roles/smb/README.md b/docs/README.SMB.md similarity index 100% rename from roles/smb/README.md rename to docs/README.SMB.md diff --git a/VARIABLES.md b/docs/VARIABLES.md similarity index 100% rename from VARIABLES.md rename to docs/VARIABLES.md From 06fa12f79f13843a6bef0db109ac4bcb1bdc2b0b Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 1 Nov 2021 10:33:19 +0100 Subject: [PATCH 014/113] Update samples to reflect new role names Signed-off-by: Achim Christ --- samples/playbook_aws.yml | 24 +++++------ samples/playbook_callhome.yml | 16 +++---- samples/playbook_ces.yml | 24 +++++------ samples/playbook_ces_hdfs.yml | 16 +++---- samples/playbook_ces_object.yml | 16 +++---- samples/playbook_cloud.yml | 24 +++++------ samples/playbook_cloud_remote_mount.yml | 2 +- samples/playbook_directory.yml | 8 ++-- samples/playbook_fileauditlogging.yml | 16 +++---- samples/playbook_json_ces.yml | 56 ++++++++++++------------- samples/playbook_localpkg.yml | 8 ++-- samples/playbook_nodeclass.yml | 8 ++-- samples/playbook_remote_mount.yml | 2 +- samples/playbook_remote_mount_cli.yml | 2 +- samples/playbook_remotepkg.yml | 8 ++-- samples/playbook_repository.yml | 8 ++-- samples/playbook_storage.yml | 8 ++-- 17 files changed, 123 insertions(+), 123 deletions(-) diff --git a/samples/playbook_aws.yml b/samples/playbook_aws.yml index e04b1c85..c506f37f 100644 --- a/samples/playbook_aws.yml +++ b/samples/playbook_aws.yml @@ -50,18 +50,18 @@ vars: - scale_install_directory_pkg_path: /opt/IBM/gpfs_cloud_rpms roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - gui_prepare + - gui_install + - gui_configure + - gui_verify + - perfmon_prepare + - perfmon_install + - perfmon_configure + - perfmon_verify # Cloud deployment specific actions after Spectrum Scale # cluster installation and setup diff --git a/samples/playbook_callhome.yml b/samples/playbook_callhome.yml index 650186d6..69323801 100644 --- a/samples/playbook_callhome.yml +++ b/samples/playbook_callhome.yml @@ -19,11 +19,11 @@ pre_tasks: - include_vars: callhome_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - callhome/precheck - - callhome/node - - callhome/cluster - - callhome/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - callhome_prepare + - callhome_install + - callhome_configure + - callhome_verify diff --git a/samples/playbook_ces.yml b/samples/playbook_ces.yml index c6438b3d..4d72e3fa 100644 --- a/samples/playbook_ces.yml +++ b/samples/playbook_ces.yml @@ -19,15 +19,15 @@ pre_tasks: - include_vars: ces_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - nfs/precheck - - nfs/node - - nfs/cluster - - nfs/postcheck - - smb/precheck - - smb/node - - smb/cluster - - smb/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - nfs_prepare + - nfs_install + - nfs_configure + - nfs_verify + - smb_prepare + - smb_install + - smb_configure + - smb_verify diff --git a/samples/playbook_ces_hdfs.yml b/samples/playbook_ces_hdfs.yml index 36051fb5..05094991 100644 --- a/samples/playbook_ces_hdfs.yml +++ b/samples/playbook_ces_hdfs.yml @@ -19,11 +19,11 @@ pre_tasks: - include_vars: hdfs_cluster_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_hdfs/precheck - - scale_hdfs/node - - scale_hdfs/cluster - - scale_hdfs/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - hdfs_prepare + - hdfs_install + - hdfs_configure + - hdfs_verify diff --git a/samples/playbook_ces_object.yml b/samples/playbook_ces_object.yml index 860e69c9..cce30776 100644 --- a/samples/playbook_ces_object.yml +++ b/samples/playbook_ces_object.yml @@ -18,11 +18,11 @@ pre_tasks: - include_vars: scale_object_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_object/precheck - - scale_object/node - - scale_object/cluster - - scale_object/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - obj_prepare + - obj_install + - obj_configure + - obj_verify diff --git a/samples/playbook_cloud.yml b/samples/playbook_cloud.yml index ee4af81a..3b562203 100644 --- a/samples/playbook_cloud.yml +++ b/samples/playbook_cloud.yml @@ -14,15 +14,15 @@ collections: - ibm.spectrum_scale roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - gui_prepare + - gui_install + - gui_configure + - gui_verify + - perfmon_prepare + - perfmon_install + - perfmon_configure + - perfmon_verify diff --git a/samples/playbook_cloud_remote_mount.yml b/samples/playbook_cloud_remote_mount.yml index 1425c3fe..432a726d 100644 --- a/samples/playbook_cloud_remote_mount.yml +++ b/samples/playbook_cloud_remote_mount.yml @@ -16,7 +16,7 @@ vars: - scale_remotemount_debug: true roles: - - remote_mount + - remotemount_configure # If Accessing/Client Cluster don't have GUI, # Then change wee need to add variable scale_remotemount_client_no_gui: true and ansible "hosts" need to point to one of the Scale client cluster node diff --git a/samples/playbook_directory.yml b/samples/playbook_directory.yml index 41ef48aa..bcac188a 100644 --- a/samples/playbook_directory.yml +++ b/samples/playbook_directory.yml @@ -16,7 +16,7 @@ vars: - scale_install_directory_pkg_path: /root/spectrum_scale_packages roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_fileauditlogging.yml b/samples/playbook_fileauditlogging.yml index 4faab376..cad4500f 100644 --- a/samples/playbook_fileauditlogging.yml +++ b/samples/playbook_fileauditlogging.yml @@ -17,11 +17,11 @@ pre_tasks: - include_vars: fal_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_fileauditlogging/precheck - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster - - scale_fileauditlogging/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - fal_prepare + - fal_install + - fal_configure + - fal_verify diff --git a/samples/playbook_json_ces.yml b/samples/playbook_json_ces.yml index 0f069367..305d47a5 100644 --- a/samples/playbook_json_ces.yml +++ b/samples/playbook_json_ces.yml @@ -17,31 +17,31 @@ vars: - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.0.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck - - callhome/precheck - - callhome/node - - callhome/cluster - - callhome/postcheck - - nfs/precheck - - nfs/node - - nfs/cluster - - nfs/postcheck - - smb/precheck - - smb/node - - smb/cluster - - smb/postcheck - - scale_fileauditlogging/precheck - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster - - scale_fileauditlogging/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - gui_prepare + - gui_install + - gui_configure + - gui_verify + - perfmon_prepare + - perfmon_install + - perfmon_configure + - perfmon_verify + - callhome_prepare + - callhome_install + - callhome_configure + - callhome_verify + - nfs_prepare + - nfs_install + - nfs_configure + - nfs_verify + - smb_prepare + - smb_install + - smb_configure + - smb_verify + - fal_prepare + - fal_install + - fal_configure + - fal_verify diff --git a/samples/playbook_localpkg.yml b/samples/playbook_localpkg.yml index 3599b6e9..eda29d91 100644 --- a/samples/playbook_localpkg.yml +++ b/samples/playbook_localpkg.yml @@ -18,7 +18,7 @@ vars: - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_nodeclass.yml b/samples/playbook_nodeclass.yml index e5575131..b6b04d1d 100644 --- a/samples/playbook_nodeclass.yml +++ b/samples/playbook_nodeclass.yml @@ -42,7 +42,7 @@ pre_tasks: - include_vars: config_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_remote_mount.yml b/samples/playbook_remote_mount.yml index 9655b655..1584e652 100644 --- a/samples/playbook_remote_mount.yml +++ b/samples/playbook_remote_mount.yml @@ -22,7 +22,7 @@ - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } roles: - - remote_mount + - remotemount_configure # If Accessing/Client Cluster don't have GUI, # Then change wee need to add variable scale_remotemount_client_no_gui: true and ansible "hosts" need to point to one of the Scale client cluster node diff --git a/samples/playbook_remote_mount_cli.yml b/samples/playbook_remote_mount_cli.yml index 43414e94..bf250ca8 100644 --- a/samples/playbook_remote_mount_cli.yml +++ b/samples/playbook_remote_mount_cli.yml @@ -23,4 +23,4 @@ - { scale_remotemount_client_filesystem_name: "fs1", scale_remotemount_client_remotemount_path: "/gpfs/fs1", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs1", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } roles: - - remote_mount + - remotemount_configure diff --git a/samples/playbook_remotepkg.yml b/samples/playbook_remotepkg.yml index 6b4a94ab..84fad65c 100644 --- a/samples/playbook_remotepkg.yml +++ b/samples/playbook_remotepkg.yml @@ -16,7 +16,7 @@ vars: - scale_install_remotepkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_repository.yml b/samples/playbook_repository.yml index e28ec5ad..079164d4 100644 --- a/samples/playbook_repository.yml +++ b/samples/playbook_repository.yml @@ -21,7 +21,7 @@ # Remember the trailing slash `/` in the URL - scale_install_repository_url: http://server/path/ roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_storage.yml b/samples/playbook_storage.yml index 644bc84a..13c40d4c 100644 --- a/samples/playbook_storage.yml +++ b/samples/playbook_storage.yml @@ -19,7 +19,7 @@ pre_tasks: - include_vars: storage_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify From be14f0dbdfe003cdcada78e41a25dbe114a99d15 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 4 Nov 2021 19:20:27 +0100 Subject: [PATCH 015/113] Update minimal example to reflect new role names Signed-off-by: Achim Christ --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index aeaaa8ca..1ec65bc2 100644 --- a/README.md +++ b/README.md @@ -204,10 +204,10 @@ Installation Instructions vars: - scale_install_localpkg_path: /path/to/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify ``` Again, this is just a minimal example. There are different installation methods available, each offering a specific set of options: From dff874d666a325f2db966e79b24f6f16c6f7c8c5 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 15 Nov 2021 16:13:46 +0100 Subject: [PATCH 016/113] Update 'import_role' tasks to reflect new role names Signed-off-by: Achim Christ --- roles/ces_common/tasks/configure.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/ces_common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml index c8a2c1de..bd7c5f34 100644 --- a/roles/ces_common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -103,11 +103,11 @@ run_once: true - import_role: - name: nfs/node + name: ibm.spectrum_scale.nfs_install when: scale_ces_disabled_nodes|length > 0 and 'NFS' in scale_service_list - import_role: - name: smb/node + name: ibm.spectrum_scale.smb_install when: scale_ces_disabled_nodes|length > 0 and 'SMB' in scale_service_list - name: configure | Prepare ces nodes string From 0c67bd7ad170c1aff813dae3d9973255d0a93aca Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Wed, 17 Nov 2021 19:34:32 +0100 Subject: [PATCH 017/113] [ces_common] Change 'scale_protocol_node_list' to use 'inventory_hostname' Signed-off-by: Achim Christ --- roles/ces_common/tasks/check.yml | 17 +++++----- roles/ces_common/tasks/configure.yml | 35 ++++++++++----------- roles/obj_install/tasks/install_dir_pkg.yml | 3 +- roles/obj_upgrade/tasks/install_dir_pkg.yml | 3 +- 4 files changed, 26 insertions(+), 32 deletions(-) diff --git a/roles/ces_common/tasks/check.yml b/roles/ces_common/tasks/check.yml index 9e571685..c945e2ce 100644 --- a/roles/ces_common/tasks/check.yml +++ b/roles/ces_common/tasks/check.yml @@ -16,7 +16,7 @@ - name: check | Collect all protocol nodes set_fact: - scale_protocol_node_list: "{{ scale_protocol_node_list + [hostvars[item]['scale_daemon_nodename']] }}" + scale_protocol_node_list: "{{ scale_protocol_node_list + [hostvars[item]['inventory_hostname']] }}" when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" @@ -155,21 +155,21 @@ Please define the CES shared root file system mount point in the inventory." - name: check | Prepare CES ip list set_fact: scale_export_ip: "{{ scale_ces_export_ip|flatten }}" - + - name: check | Prepare IPv6 export ip list set_fact: scale_ces_ipv6_list: "{{ scale_ces_ipv6_list + [ item ]}}" when: item is regex ( scale_ipv6_regex ) with_items: - "{{ scale_export_ip }}" - + - name: check | Prepare IPv4 export ip list set_fact: scale_ces_ipv4_list: "{{ scale_ces_ipv4_list + [ item ]}}" when: item is regex ( scale_ipv4_regex ) with_items: - "{{ scale_export_ip }}" - + - name: check | Check if interface is defined assert: that: @@ -181,7 +181,7 @@ Please define the CES shared root file system mount point in the inventory." msg: "Mixed IPs detected. All CES export IPs can be either IPv4 or IPv6." when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 failed_when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 - + when: scale_protocols.scale_ces_groups is defined and scale_protocols.scale_ces_groups|length > 0 - block: @@ -191,7 +191,7 @@ Please define the CES shared root file system mount point in the inventory." when: item is regex ( scale_ipv6_regex ) with_items: - "{{ scale_protocols.export_ip_pool }}" - + - name: check | Prepare IPv4 export ip list set_fact: scale_ces_ipv4_list: "{{ scale_ces_ipv4_list + [ item ]}}" @@ -207,9 +207,8 @@ Please define the CES shared root file system mount point in the inventory." - name: check | Check if all ces ips are either IPv4 or IPv6 debug: - msg: "Mixed IPs detected. All CES export IPs can be either IPv4 or IPv6." + msg: "Mixed IPs detected. All CES export IPs can be either IPv4 or IPv6." when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 failed_when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 - - when: scale_protocols.scale_ces_groups is not defined + when: scale_protocols.scale_ces_groups is not defined diff --git a/roles/ces_common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml index bd7c5f34..7374a265 100644 --- a/roles/ces_common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -21,9 +21,9 @@ - name: configure | Prepare server nodes string set_fact: - scale_server_nodes: "{{ scale_server_nodes + ',' + item|string }}" + scale_server_nodes: "{{ scale_server_nodes + ',' + hostvars[item]['scale_daemon_nodename'] | string }}" with_items: - - "{{ scale_protocol_node_list }}" + - "{{ scale_protocol_node_list }}" - name: configure | Setting server licenses on protocol nodes command: "{{ scale_command_path }}mmchlicense server --accept -N {{ scale_server_nodes[1:] }}" @@ -42,8 +42,8 @@ - name: configure | Collect all nodes on which ces is not enabled set_fact: - scale_ces_disabled_nodes: "{{ scale_ces_disabled_nodes + [ item ]}}" - when: not scale_ces_enable_status.stdout_lines is search(item) + scale_ces_disabled_nodes: "{{ scale_ces_disabled_nodes + [hostvars[item]['scale_daemon_nodename']] }}" + when: not scale_ces_enable_status.stdout_lines is search(hostvars[item]['scale_daemon_nodename']) with_items: - "{{ scale_protocol_node_list }}" @@ -52,14 +52,13 @@ name: iputils-arping state: present when: (ansible_distribution in scale_ubuntu_distribution) and - (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) + (inventory_hostname in scale_protocol_node_list) - name: configure | Check if SMB is running shell: cmd: "{{ scale_command_path }}mmces service list|grep SMB" register: scale_service_status - when: (ansible_fqdn in scale_protocol_node_list) or - (inventory_hostname in scale_protocol_node_list) + when: inventory_hostname in scale_protocol_node_list ignore_errors: true failed_when: false run_once: true @@ -67,14 +66,14 @@ - name: configure | Add SMB service to list set_fact: scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('SMB')] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc > 0 ) run_once: true - name: configure | Add SMB service to list set_fact: scale_service_list: "{{ scale_service_list + ['SMB'] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc == 0 ) run_once: true @@ -82,8 +81,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep NFS" register: scale_service_status - when: (ansible_fqdn in scale_protocol_node_list) or - (inventory_hostname in scale_protocol_node_list) + when: inventory_hostname in scale_protocol_node_list ignore_errors: true failed_when: false run_once: true @@ -91,14 +89,14 @@ - name: configure | Add NFS service to the list set_fact: scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('NFS')] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc > 0 ) run_once: true - name: configure | Add NFS service to the list set_fact: scale_service_list: "{{ scale_service_list + ['NFS'] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc == 0 ) run_once: true @@ -128,10 +126,10 @@ #- name: configure | Collect status of ces nodes #shell: - # cmd: "{{ scale_command_path }}mmces node list|grep {{ item }}" + # cmd: "{{ scale_command_path }}mmces node list|grep {{ hostvars[item]['scale_daemon_nodename'] }}" #register: scale_ces_enable_status #with_items: - #- "{{ scale_protocol_node_list }}" + # - "{{ scale_protocol_node_list }}" #delegate_to: "{{ scale_protocol_node_list.0 }}" #- name: configure | Check CES enabled on all nodes @@ -157,7 +155,7 @@ - name: configure| Check CES enabled on all nodes assert: that: - - "item in scale_ces_enable_status.stdout" + - hostvars[item]['scale_daemon_nodename'] in scale_ces_enable_status.stdout fail_msg: "CES is not enabled on {{ item }} protocol node" success_msg: "Successful enabling of CES on protocol node {{ item }}" with_items: @@ -192,8 +190,7 @@ - name: configure | Assign export ips as pool for CES groups command: "{{ scale_command_path }}mmces address add --ces-ip {{ item.export_ip_pool|join(',') }} --ces-group {{ item.group_name}}" - when: (ansible_fqdn in scale_protocol_node_list) or - (inventory_hostname in scale_protocol_node_list) + when: inventory_hostname in scale_protocol_node_list delegate_to: "{{ scale_protocol_node_list.0 }}" with_items: - "{{ scale_protocols.scale_ces_groups }}" @@ -218,7 +215,7 @@ - name: configure | Rebalance CES IPs command: "{{ scale_command_path }}mmces address move --rebalance" - #when: ansible_fqdn in scale_protocol_node_list + #when: inventory_hostname in scale_protocol_node_list delegate_to: "{{ scale_protocol_node_list.0 }}" run_once: true diff --git a/roles/obj_install/tasks/install_dir_pkg.yml b/roles/obj_install/tasks/install_dir_pkg.yml index d29a6d69..712c7374 100644 --- a/roles/obj_install/tasks/install_dir_pkg.yml +++ b/roles/obj_install/tasks/install_dir_pkg.yml @@ -84,5 +84,4 @@ msg: "No Scale object (spectrum-scale-object) package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: when: ansible_fqdn in scale_protocol_node_list - + when: inventory_hostname in scale_protocol_node_list diff --git a/roles/obj_upgrade/tasks/install_dir_pkg.yml b/roles/obj_upgrade/tasks/install_dir_pkg.yml index 52fc41f6..77244733 100644 --- a/roles/obj_upgrade/tasks/install_dir_pkg.yml +++ b/roles/obj_upgrade/tasks/install_dir_pkg.yml @@ -84,5 +84,4 @@ msg: "No Scale object (spectrum-scale-object) package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: when: ansible_fqdn in scale_protocol_node_list - + when: inventory_hostname in scale_protocol_node_list From a1459f00509b4f859bb66e699eee03ecccbb5c86 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Wed, 17 Nov 2021 20:03:02 +0100 Subject: [PATCH 018/113] [OBJ] Change 'scale_obj_nodes_list' to use 'inventory_hostname' Signed-off-by: Achim Christ --- roles/obj_install/tasks/install_local_pkg.yml | 7 +++---- roles/obj_install/tasks/install_pmswift.yml | 5 ++--- roles/obj_install/tasks/install_remote_pkg.yml | 5 ++--- roles/obj_install/tasks/yum/install.yml | 7 +++---- roles/obj_prepare/tasks/check.yml | 2 +- roles/obj_upgrade/tasks/install_local_pkg.yml | 7 +++---- roles/obj_upgrade/tasks/install_pmswift.yml | 6 ++++-- roles/obj_upgrade/tasks/install_remote_pkg.yml | 5 ++--- roles/obj_verify/tasks/check.yml | 4 ++-- 9 files changed, 22 insertions(+), 26 deletions(-) diff --git a/roles/obj_install/tasks/install_local_pkg.yml b/roles/obj_install/tasks/install_local_pkg.yml index 8ca34192..39e1d326 100644 --- a/roles/obj_install/tasks/install_local_pkg.yml +++ b/roles/obj_install/tasks/install_local_pkg.yml @@ -99,13 +99,13 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: install | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' - - name: install | Find all packages + - name: install | Find all packages find: paths: "{{ obj_extracted_path }}/{{ scale_obj_url }}" patterns: "*.rpm" @@ -132,5 +132,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_install/tasks/install_pmswift.yml b/roles/obj_install/tasks/install_pmswift.yml index ec93d44b..2d4345a2 100644 --- a/roles/obj_install/tasks/install_pmswift.yml +++ b/roles/obj_install/tasks/install_pmswift.yml @@ -5,7 +5,7 @@ # Add pmswift rpm -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: install | pmswift path set_fact: @@ -45,5 +45,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + pmswift_package }}" when: scale_install_repository_url is undefined - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_install/tasks/install_remote_pkg.yml b/roles/obj_install/tasks/install_remote_pkg.yml index cfc4ae82..14aee865 100644 --- a/roles/obj_install/tasks/install_remote_pkg.yml +++ b/roles/obj_install/tasks/install_remote_pkg.yml @@ -73,7 +73,7 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: install | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' @@ -106,5 +106,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_install/tasks/yum/install.yml b/roles/obj_install/tasks/yum/install.yml index 59506d16..d21e99c3 100644 --- a/roles/obj_install/tasks/yum/install.yml +++ b/roles/obj_install/tasks/yum/install.yml @@ -4,12 +4,12 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list - name: install | Get installed spectrum-scale-object shell: rpm -qa | grep spectrum-scale-object register: scale_package_status - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list ignore_errors: true args: warn: false @@ -19,5 +19,4 @@ that: - scale_package_status.rc == 0 fail_msg: "spectrum-scale-object is not installed on {{ ansible_hostname }}" - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_prepare/tasks/check.yml b/roles/obj_prepare/tasks/check.yml index fe80e7b1..c786fc20 100644 --- a/roles/obj_prepare/tasks/check.yml +++ b/roles/obj_prepare/tasks/check.yml @@ -12,7 +12,7 @@ - name: check | Collect all object nodes set_fact: - scale_obj_nodes_list: "{{ scale_obj_nodes_list + [hostvars[item]['ansible_fqdn']] }}" + scale_obj_nodes_list: "{{ scale_obj_nodes_list + [hostvars[item]['inventory_hostname']] }}" when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" diff --git a/roles/obj_upgrade/tasks/install_local_pkg.yml b/roles/obj_upgrade/tasks/install_local_pkg.yml index c06510a2..843fd8cf 100644 --- a/roles/obj_upgrade/tasks/install_local_pkg.yml +++ b/roles/obj_upgrade/tasks/install_local_pkg.yml @@ -101,13 +101,13 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: upgrade | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' - - name: upgrade | Find all packages + - name: upgrade | Find all packages find: paths: "{{ obj_extracted_path }}/{{ scale_obj_url }}" patterns: "*.rpm" @@ -134,5 +134,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_upgrade/tasks/install_pmswift.yml b/roles/obj_upgrade/tasks/install_pmswift.yml index e0ae0e3b..16ba727a 100644 --- a/roles/obj_upgrade/tasks/install_pmswift.yml +++ b/roles/obj_upgrade/tasks/install_pmswift.yml @@ -5,7 +5,7 @@ # Add pmswift rpm -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: upgrade | pmswift path set_fact: scale_obj_url: 'zimon_rpms/rhel8' @@ -24,9 +24,11 @@ - "{{ scale_obj_sensors_packages }}" when: scale_install_repository_url is defined - - name: upgrade | Add pmswift package name + - name: upgrade | Add pmswift package name vars: pmswift_package: "{{ object_package.files | map(attribute='path') | list}}" set_fact: scale_install_all_packages: "{{ scale_install_all_packages + pmswift_package }}" when: scale_install_repository_url is undefined + + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_upgrade/tasks/install_remote_pkg.yml b/roles/obj_upgrade/tasks/install_remote_pkg.yml index 0620ae7c..f632383a 100644 --- a/roles/obj_upgrade/tasks/install_remote_pkg.yml +++ b/roles/obj_upgrade/tasks/install_remote_pkg.yml @@ -73,7 +73,7 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: upgrade | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' @@ -106,5 +106,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/obj_verify/tasks/check.yml b/roles/obj_verify/tasks/check.yml index a59a5427..1729c381 100644 --- a/roles/obj_verify/tasks/check.yml +++ b/roles/obj_verify/tasks/check.yml @@ -4,7 +4,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep OBJ" register: scale_obj_status - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list failed_when: false - name: postcheck | Check if OBJ is running @@ -12,4 +12,4 @@ that: - scale_obj_status.rc == 0 fail_msg: "OBJ is not active on {{ ansible_hostname }}" - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list From d7609f8d035af39753fbeab1aa51b4dfe1e84fd0 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 18 Nov 2021 05:22:04 +0100 Subject: [PATCH 019/113] [NFS] Change 'scale_nfs_nodes_list' to use 'inventory_hostname' Signed-off-by: Achim Christ --- roles/nfs_install/tasks/apt/install.yml | 4 ++-- roles/nfs_install/tasks/install.yml | 2 +- roles/nfs_install/tasks/yum/install.yml | 2 +- roles/nfs_prepare/tasks/check.yml | 14 +++++++------- roles/nfs_verify/tasks/check.yml | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/roles/nfs_install/tasks/apt/install.yml b/roles/nfs_install/tasks/apt/install.yml index 3d7b82b1..7859fae0 100644 --- a/roles/nfs_install/tasks/apt/install.yml +++ b/roles/nfs_install/tasks/apt/install.yml @@ -3,12 +3,12 @@ package: name: "{{ scale_install_all_packages }}" state: present - when: scale_install_repository_url is defined and ansible_fqdn in scale_nfs_nodes_list + when: scale_install_repository_url is defined and inventory_hostname in scale_nfs_nodes_list - name: install| Install GPFS NFS deb apt: deb: "{{ item }}" state: present - when: scale_install_repository_url is not defined and ansible_fqdn in scale_nfs_nodes_list + when: scale_install_repository_url is not defined and inventory_hostname in scale_nfs_nodes_list with_items: - "{{ scale_install_all_packages }}" diff --git a/roles/nfs_install/tasks/install.yml b/roles/nfs_install/tasks/install.yml index d405db94..07809c57 100644 --- a/roles/nfs_install/tasks/install.yml +++ b/roles/nfs_install/tasks/install.yml @@ -72,4 +72,4 @@ package: name: rpcbind state: present - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list diff --git a/roles/nfs_install/tasks/yum/install.yml b/roles/nfs_install/tasks/yum/install.yml index d23447df..73ee4575 100644 --- a/roles/nfs_install/tasks/yum/install.yml +++ b/roles/nfs_install/tasks/yum/install.yml @@ -4,4 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list diff --git a/roles/nfs_prepare/tasks/check.yml b/roles/nfs_prepare/tasks/check.yml index 4dd769a8..afa2042e 100644 --- a/roles/nfs_prepare/tasks/check.yml +++ b/roles/nfs_prepare/tasks/check.yml @@ -11,7 +11,7 @@ - name: check | Collect all nfs nodes set_fact: - scale_nfs_nodes_list: "{{ scale_nfs_nodes_list + [hostvars[item]['ansible_fqdn']] }}" + scale_nfs_nodes_list: "{{ scale_nfs_nodes_list + [hostvars[item]['inventory_hostname']] }}" when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" @@ -31,7 +31,7 @@ shell: cmd: systemctl status nfs-server register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list ignore_errors: true failed_when: false @@ -41,14 +41,14 @@ - scale_nfs_status.rc > 0 fail_msg: "Service nfs found running on {{ ansible_hostname }}. Which conflicts with the installation of NFS. SUGGESTTED ACTION- Run commands to stop (systemctl stop nfs) and disable (systemctl disable nfs) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list any_errors_fatal: true - name: check | Collect status of service nfs-kernel-server shell: cmd: systemctl status nfs-kernel-server register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list ignore_errors: true failed_when: false @@ -59,14 +59,14 @@ SUGGESTTED ACTION- Run commands to stop (systemctl stop nfs) and disable (system fail_msg: "Service nfs-kernel-server found running on {{ ansible_hostname }}. Which conflicts with the installation of NFS. SUGGESTTED ACTION Run commands to stop (systemctl stop nfs-kernel-server) and disable (systemctl disable nfs-kernel-server) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list any_errors_fatal: true - name: check | Collect status of service knfs-server shell: cmd: systemctl status knfs-server register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list ignore_errors: true failed_when: false @@ -77,5 +77,5 @@ this service on node {{ ansible_hostname }}" fail_msg: "Service knfs-kernel-server found running on {{ ansible_hostname }}. Which conflicts with the installation of NFS. SUGGESTTED ACTION Run commands to stop (systemctl stop knfs-server) and disable (systemctl disable knfs-server) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list any_errors_fatal: true diff --git a/roles/nfs_verify/tasks/check.yml b/roles/nfs_verify/tasks/check.yml index 17f2a5f7..058ed741 100644 --- a/roles/nfs_verify/tasks/check.yml +++ b/roles/nfs_verify/tasks/check.yml @@ -3,7 +3,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep NFS" register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list failed_when: false - name: postcheck | Check if NFS is running @@ -11,4 +11,4 @@ that: - scale_nfs_status.rc == 0 fail_msg: "NFS is not active on {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list From 0f28a59e555a33314b02279efcc16584d52d544b Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 18 Nov 2021 05:28:24 +0100 Subject: [PATCH 020/113] [SMB] Change 'scale_smb_node_list' to use 'inventory_hostname' Signed-off-by: Achim Christ --- roles/smb_install/tasks/apt/install.yml | 5 ++--- roles/smb_install/tasks/yum/install.yml | 3 +-- roles/smb_install/tasks/zypper/install.yml | 2 +- roles/smb_prepare/tasks/check.yml | 14 +++++++------- roles/smb_verify/tasks/check.yml | 4 ++-- 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/roles/smb_install/tasks/apt/install.yml b/roles/smb_install/tasks/apt/install.yml index 4e99a32b..439f555f 100644 --- a/roles/smb_install/tasks/apt/install.yml +++ b/roles/smb_install/tasks/apt/install.yml @@ -3,14 +3,13 @@ package: name: "{{ scale_install_all_packages }}" state: present - when: scale_install_repository_url is defined and ansible_fqdn in scale_smb_node_list + when: scale_install_repository_url is defined and inventory_hostname in scale_smb_node_list - name: install| Install GPFS SMB deb apt: deb: "{{ item }}" state: present - when: scale_install_repository_url is not defined and ansible_fqdn in scale_smb_node_list + when: scale_install_repository_url is not defined and inventory_hostname in scale_smb_node_list with_items: - "{{ scale_install_all_packages }}" - diff --git a/roles/smb_install/tasks/yum/install.yml b/roles/smb_install/tasks/yum/install.yml index 44aa7ef0..04df3786 100644 --- a/roles/smb_install/tasks/yum/install.yml +++ b/roles/smb_install/tasks/yum/install.yml @@ -4,5 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_smb_node_list - + when: inventory_hostname in scale_smb_node_list diff --git a/roles/smb_install/tasks/zypper/install.yml b/roles/smb_install/tasks/zypper/install.yml index 351f4f05..ea0e031d 100644 --- a/roles/smb_install/tasks/zypper/install.yml +++ b/roles/smb_install/tasks/zypper/install.yml @@ -4,4 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: no - when: ansible_fqdn in scale_smb_node_list + when: inventory_hostname in scale_smb_node_list diff --git a/roles/smb_prepare/tasks/check.yml b/roles/smb_prepare/tasks/check.yml index 63bf7843..a90ac9b3 100644 --- a/roles/smb_prepare/tasks/check.yml +++ b/roles/smb_prepare/tasks/check.yml @@ -5,7 +5,7 @@ - name: check | Collect all smb nodes set_fact: - scale_smb_node_list: "{{ scale_smb_node_list + [hostvars[item]['ansible_fqdn']] }}" + scale_smb_node_list: "{{ scale_smb_node_list + [hostvars[item]['inventory_hostname']] }}" when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" @@ -30,7 +30,7 @@ that: - ansible_facts.services["smb"].state != "running" fail_msg: "Service smb found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop smb) and disable (systemctl disable smb) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["smb"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["smb"].state is defined any_errors_fatal: true - name: check | Check if service smbd is running @@ -38,7 +38,7 @@ that: - ansible_facts.services["smbd"].state != "running" fail_msg: "Service smbd found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop smbd) and disable (systemctl disable smbd) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["smbd"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["smbd"].state is defined any_errors_fatal: true - name: check | Check if service winbind is running @@ -46,7 +46,7 @@ that: - ansible_facts.services["winbind"].state != "running" fail_msg: "Service smb found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop winbind) and disable (systemctl disable winbind) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["winbind"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["winbind"].state is defined any_errors_fatal: true - name: check | Check if service winbindd is running @@ -54,7 +54,7 @@ that: - ansible_facts.services["winbindd"].state != "running" fail_msg: "Service winbindd found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop winbindd) and disable (systemctl disable winbindd) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["winbindd"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["winbindd"].state is defined any_errors_fatal: true - name: check | Check if service ctdb is running @@ -62,7 +62,7 @@ that: - ansible_facts.services["ctdb"].state != "running" fail_msg: "Service ctdb found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop ctdb) and disable (systemctl disable ctdb) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["ctdb"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["ctdb"].state is defined any_errors_fatal: true - name: check | Check if service ctdbd is running @@ -70,7 +70,7 @@ that: - ansible_facts.services["ctdbd"].state != "running" fail_msg: "Service ctdbd found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop ctdbd) and disable (systemctl disable ctdbd) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["ctdbd"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["ctdbd"].state is defined any_errors_fatal: true - debug: diff --git a/roles/smb_verify/tasks/check.yml b/roles/smb_verify/tasks/check.yml index c665896c..32ff3b09 100644 --- a/roles/smb_verify/tasks/check.yml +++ b/roles/smb_verify/tasks/check.yml @@ -3,7 +3,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep SMB" register: scale_smb_status - when: ansible_fqdn in scale_smb_node_list + when: inventory_hostname in scale_smb_node_list ignore_errors: true failed_when: false @@ -12,4 +12,4 @@ that: - scale_smb_status.rc == 0 fail_msg: "SMB is not active on {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list + when: inventory_hostname in scale_smb_node_list From 18b725df71908ddf23a0ebe1e76cd0b854a4e402 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 18 Nov 2021 05:35:19 +0100 Subject: [PATCH 021/113] [HDFS] Change 'scale_protocol_nodes_list' to use 'inventory_hostname' Signed-off-by: Achim Christ --- roles/hdfs_install/tasks/yum/install.yml | 3 +-- roles/hdfs_prepare/tasks/check.yml | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/roles/hdfs_install/tasks/yum/install.yml b/roles/hdfs_install/tasks/yum/install.yml index 654a4d97..a2921e6d 100644 --- a/roles/hdfs_install/tasks/yum/install.yml +++ b/roles/hdfs_install/tasks/yum/install.yml @@ -4,5 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_hdfs_nodes_list or ansible_fqdn in scale_protocol_nodes_list - + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_protocol_nodes_list diff --git a/roles/hdfs_prepare/tasks/check.yml b/roles/hdfs_prepare/tasks/check.yml index ce06bb9b..1155bfe9 100644 --- a/roles/hdfs_prepare/tasks/check.yml +++ b/roles/hdfs_prepare/tasks/check.yml @@ -1,6 +1,6 @@ --- - include_tasks: prepare_env.yml - + - debug: msg: "transparency_33_enabled: {{ transparency_33_enabled|bool }}" @@ -12,7 +12,7 @@ - name: check | Collect all protocol nodes set_fact: - scale_protocol_nodes_list: "{{ scale_protocol_nodes_list + [hostvars[hosts]['ansible_fqdn']] }}" + scale_protocol_nodes_list: "{{ scale_protocol_nodes_list + [hostvars[hosts]['inventory_hostname']] }}" when: hostvars[hosts]['is_protocol_node'] is defined and hostvars[hosts]['is_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" @@ -68,7 +68,7 @@ - fail: msg: "Not sufficient CESIPs are assigned in export_ip_pool for HDFS clusters, please add more CESIP and retry." - when: + when: - hdfs_cluster_length|int > export_cesip_length|int delegate_to: localhost run_once: true @@ -116,4 +116,3 @@ - debug: msg: "HDFS Precheck ok" when: scale_hdfs_clusters|length == 1 - From f4c71158ad084d63d0d82485f270382a27feaec1 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Wed, 24 Nov 2021 09:12:24 +0100 Subject: [PATCH 022/113] [Perfmon] Extract daemon nodename from 'scale_zimon_collectors' Signed-off-by: Achim Christ --- roles/perfmon_configure/tasks/configure.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/perfmon_configure/tasks/configure.yml b/roles/perfmon_configure/tasks/configure.yml index e900ca73..f17566f6 100644 --- a/roles/perfmon_configure/tasks/configure.yml +++ b/roles/perfmon_configure/tasks/configure.yml @@ -60,7 +60,7 @@ #TODO: added a check for output, but are having problems using the ( collector_nodes | join(',') ) to use when adding nodes. - name: configure | Initialize performance collection vars: - collector_nodes: "{{ groups['scale_zimon_collectors'] | list }}" + collector_nodes: "{{ groups['scale_zimon_collectors'] | map('extract', hostvars, 'scale_daemon_nodename') | list }}" command: /usr/lpp/mmfs/bin/mmperfmon config generate --collectors {{ collector_nodes | join(',') }} register: scale_zimon_conf_pmcollector when: @@ -73,7 +73,7 @@ - name: configure | update performance collection for new node vars: - collector_nodes_new: "{{ groups['scale_zimon_collectors'] | list }}" + collector_nodes_new: "{{ groups['scale_zimon_collectors'] | map('extract', hostvars, 'scale_daemon_nodename') | list }}" command: /usr/lpp/mmfs/bin/mmperfmon config update --collectors "{{ collector_nodes_new | join(',') }}" register: scale_zimon_update_pmcollector when: From bf655bd05c0e8ebe30f2b091dd32815419c80189 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Wed, 24 Nov 2021 09:36:26 +0100 Subject: [PATCH 023/113] [OBJ] Use daemon nodename as cluster hostname (temporary) Signed-off-by: Achim Christ --- roles/obj_configure/tasks/configure.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/obj_configure/tasks/configure.yml b/roles/obj_configure/tasks/configure.yml index 3872c807..e969ecf8 100644 --- a/roles/obj_configure/tasks/configure.yml +++ b/roles/obj_configure/tasks/configure.yml @@ -48,21 +48,21 @@ - name: configure | Set configuration parameter to configure OBJ set_fact: - obj_param: "-g {{ scale_protocols.mountpoint }} -o {{ scale_ces_obj.object_fileset }} --cluster-hostname {{ scale_obj_nodes_list.0 }} --pwd-file {{ scale_ces_obj.pwd_file }}" + obj_param: "-g {{ scale_protocols.mountpoint }} -o {{ scale_ces_obj.object_fileset }} --cluster-hostname {{ hostvars[scale_obj_nodes_list.0]['scale_daemon_nodename'] }} --pwd-file {{ scale_ces_obj.pwd_file }}" delegate_to: "{{ scale_obj_nodes_list.0 }}" when: - not scale_ces_dynamic_obj|bool run_once: True -- name: configure | Check local-keystone is defined +- name: configure | Check local-keystone is defined set_fact: - obj_param: "{{ obj_param }} --local-keystone" - when: + obj_param: "{{ obj_param }} --local-keystone" + when: - scale_ces_obj.local_keystone is defined and scale_ces_obj.local_keystone|bool - not scale_ces_dynamic_obj|bool delegate_to: "{{ scale_obj_nodes_list.0 }}" run_once: True - + - name: configure | Check enable-s3 is defined set_fact: obj_param: "{{ obj_param }} --enable-s3" @@ -80,7 +80,7 @@ - not scale_ces_dynamic_obj|bool delegate_to: "{{ scale_obj_nodes_list.0 }}" run_once: true - + # # Configure Object # @@ -104,8 +104,8 @@ register: scale_ces_enable_obj_service - name: configure | Show OBJ Service is enabled - debug: - var: scale_ces_enable_obj_service.stdout_lines + debug: + var: scale_ces_enable_obj_service.stdout_lines # Start Object on CES - name: configure | Start OBJ Service @@ -124,4 +124,4 @@ when: obj_enabled is defined and not obj_enabled delegate_to: "{{ scale_obj_nodes_list.0 }}" - run_once: true + run_once: true From 88b6d69ccca8bec0a3894547f5e890c48ac6b0ad Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Wed, 24 Nov 2021 16:27:14 +0100 Subject: [PATCH 024/113] Consistently check if scale_install_repository_url is defined when creating YUM/APT/Zypper repositories Signed-off-by: Achim Christ --- roles/afm_cos_install/tasks/install_repository.yml | 3 +++ roles/afm_cos_upgrade/tasks/install_repository.yml | 3 +++ roles/callhome_install/tasks/install_repository.yml | 3 +++ roles/core_configure/tasks/install_gplbin.yml | 3 ++- roles/core_install/tasks/install_gplbin.yml | 3 +++ roles/core_install/tasks/install_repository.yml | 3 +++ roles/core_upgrade/tasks/install_gplbin.yml | 3 +++ roles/core_upgrade/tasks/install_repository.yml | 3 +++ roles/ece_install/tasks/install_repository.yml | 1 + roles/ece_upgrade/tasks/install_repository.yml | 1 + roles/fal_install/tasks/install_repository.yml | 9 ++++++++- roles/fal_upgrade/tasks/install_repository.yml | 9 ++++++++- roles/gui_install/tasks/install_repository.yml | 3 +++ roles/gui_upgrade/tasks/install_repository.yml | 3 +++ roles/hdfs_install/tasks/install_repository.yml | 1 + roles/hdfs_upgrade/tasks/upgrade_repository.yml | 1 + roles/nfs_install/tasks/install_repository.yml | 7 +++++++ roles/nfs_upgrade/tasks/install_repository.yml | 6 ++++++ roles/obj_install/tasks/install_pmswift.yml | 1 + roles/obj_install/tasks/install_repository.yml | 1 + roles/obj_upgrade/tasks/install_repository.yml | 1 + roles/perfmon_install/tasks/install_repository.yml | 6 +++++- roles/perfmon_upgrade/tasks/install_repository.yml | 6 +++++- roles/smb_install/tasks/install_repository.yml | 3 +++ roles/smb_upgrade/tasks/install_repository.yml | 3 +++ 25 files changed, 81 insertions(+), 5 deletions(-) diff --git a/roles/afm_cos_install/tasks/install_repository.yml b/roles/afm_cos_install/tasks/install_repository.yml index de6e4399..069bbf1b 100644 --- a/roles/afm_cos_install/tasks/install_repository.yml +++ b/roles/afm_cos_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure HPT APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure HPT repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/afm_cos_upgrade/tasks/install_repository.yml b/roles/afm_cos_upgrade/tasks/install_repository.yml index d01cad31..1b732d47 100644 --- a/roles/afm_cos_upgrade/tasks/install_repository.yml +++ b/roles/afm_cos_upgrade/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure HPT APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure HPT repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/callhome_install/tasks/install_repository.yml b/roles/callhome_install/tasks/install_repository.yml index 6e330361..66e31f80 100755 --- a/roles/callhome_install/tasks/install_repository.yml +++ b/roles/callhome_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure Callhome APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure Callhome zypper repository @@ -40,6 +42,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/core_configure/tasks/install_gplbin.yml b/roles/core_configure/tasks/install_gplbin.yml index 2f149f21..a750e9cc 100644 --- a/roles/core_configure/tasks/install_gplbin.yml +++ b/roles/core_configure/tasks/install_gplbin.yml @@ -13,8 +13,9 @@ state: present notify: yum-clean-metadata when: - - ansible_pkg_mgr == 'yum' + - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' # # Add kernel extension prereqs diff --git a/roles/core_install/tasks/install_gplbin.yml b/roles/core_install/tasks/install_gplbin.yml index a3a5bfbf..620e1f2b 100644 --- a/roles/core_install/tasks/install_gplbin.yml +++ b/roles/core_install/tasks/install_gplbin.yml @@ -15,6 +15,7 @@ when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: install | Configure GPL module repository apt_repository: @@ -26,6 +27,7 @@ when: - ansible_pkg_mgr == 'apt' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: install | Configure GPL module repository zypper_repository: @@ -37,6 +39,7 @@ when: - ansible_pkg_mgr == 'zypper' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' # # Add kernel extension prereqs # diff --git a/roles/core_install/tasks/install_repository.yml b/roles/core_install/tasks/install_repository.yml index bcb0311d..f04f0865 100644 --- a/roles/core_install/tasks/install_repository.yml +++ b/roles/core_install/tasks/install_repository.yml @@ -17,6 +17,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -32,6 +33,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -46,6 +48,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/core_upgrade/tasks/install_gplbin.yml b/roles/core_upgrade/tasks/install_gplbin.yml index a9660c46..7b43d86c 100644 --- a/roles/core_upgrade/tasks/install_gplbin.yml +++ b/roles/core_upgrade/tasks/install_gplbin.yml @@ -15,6 +15,7 @@ when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: upgrade | Configure GPL module repository apt_repository: @@ -26,6 +27,7 @@ when: - ansible_pkg_mgr == 'apt' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: upgrade | Configure GPL module repository zypper_repository: @@ -37,6 +39,7 @@ when: - ansible_pkg_mgr == 'zypper' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' # # Add kernel extension prereqs # diff --git a/roles/core_upgrade/tasks/install_repository.yml b/roles/core_upgrade/tasks/install_repository.yml index 8cc53aed..131dc5c0 100644 --- a/roles/core_upgrade/tasks/install_repository.yml +++ b/roles/core_upgrade/tasks/install_repository.yml @@ -20,6 +20,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -35,6 +36,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -49,6 +51,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/ece_install/tasks/install_repository.yml b/roles/ece_install/tasks/install_repository.yml index 2e952a3c..4cb384cd 100644 --- a/roles/ece_install/tasks/install_repository.yml +++ b/roles/ece_install/tasks/install_repository.yml @@ -20,6 +20,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS gnr packages to list diff --git a/roles/ece_upgrade/tasks/install_repository.yml b/roles/ece_upgrade/tasks/install_repository.yml index 44c462c2..a9408e84 100644 --- a/roles/ece_upgrade/tasks/install_repository.yml +++ b/roles/ece_upgrade/tasks/install_repository.yml @@ -20,6 +20,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS gnr packages to list diff --git a/roles/fal_install/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml index 8e2e9b39..1e7b9f93 100644 --- a/roles/fal_install/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -40,6 +40,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -55,6 +56,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -69,8 +71,9 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - + - name: install | Configure fal YUM repository yum_repository: name: spectrum-scale-fal @@ -83,6 +86,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure fal APT repository @@ -96,6 +100,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure fal repository @@ -107,6 +112,8 @@ state: present when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' # # Add FAL packages diff --git a/roles/fal_upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml index 8420eaa7..49282c20 100644 --- a/roles/fal_upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -47,6 +47,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -62,6 +63,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -76,8 +78,9 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - + - name: upgrade | Configure fal YUM repository yum_repository: name: spectrum-scale-fal @@ -90,6 +93,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure fal APT repository @@ -103,6 +107,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure fal repository @@ -114,6 +119,8 @@ state: present when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' # # Add FAL packages diff --git a/roles/gui_install/tasks/install_repository.yml b/roles/gui_install/tasks/install_repository.yml index 21156435..70396d60 100644 --- a/roles/gui_install/tasks/install_repository.yml +++ b/roles/gui_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure gui APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure GUI repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/gui_upgrade/tasks/install_repository.yml b/roles/gui_upgrade/tasks/install_repository.yml index 6b4a50d8..c1f1fb00 100644 --- a/roles/gui_upgrade/tasks/install_repository.yml +++ b/roles/gui_upgrade/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure gui APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure GUI repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/hdfs_install/tasks/install_repository.yml b/roles/hdfs_install/tasks/install_repository.yml index 14bae69c..6ebb0e20 100644 --- a/roles/hdfs_install/tasks/install_repository.yml +++ b/roles/hdfs_install/tasks/install_repository.yml @@ -21,6 +21,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS hdfs packages to list diff --git a/roles/hdfs_upgrade/tasks/upgrade_repository.yml b/roles/hdfs_upgrade/tasks/upgrade_repository.yml index e04047e9..e1933fed 100644 --- a/roles/hdfs_upgrade/tasks/upgrade_repository.yml +++ b/roles/hdfs_upgrade/tasks/upgrade_repository.yml @@ -24,6 +24,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS hdfs packages to list diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index 1a1422b1..adedb4e1 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -86,6 +86,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure nfs APT repository @@ -99,6 +100,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure smb APT repository @@ -112,6 +114,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure nfs zypper repository @@ -123,6 +126,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install|configure pm-ganesha YUM repository @@ -137,6 +141,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure pm-ganesha APT repository @@ -150,6 +155,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure pm-ganesha zypper repository @@ -162,6 +168,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS nfs packages to list diff --git a/roles/nfs_upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml index 90a52e57..d77016d2 100644 --- a/roles/nfs_upgrade/tasks/install_repository.yml +++ b/roles/nfs_upgrade/tasks/install_repository.yml @@ -66,6 +66,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure nfs APT repository @@ -79,6 +80,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure nfs zypper repository @@ -90,6 +92,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install|configure pm-ganesha YUM repository @@ -104,6 +107,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure pm-ganesha APT repository @@ -117,6 +121,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure pm-ganesha zypper repository @@ -129,6 +134,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS nfs packages to list diff --git a/roles/obj_install/tasks/install_pmswift.yml b/roles/obj_install/tasks/install_pmswift.yml index ec93d44b..9ff116b9 100644 --- a/roles/obj_install/tasks/install_pmswift.yml +++ b/roles/obj_install/tasks/install_pmswift.yml @@ -23,6 +23,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Find pmswift packages diff --git a/roles/obj_install/tasks/install_repository.yml b/roles/obj_install/tasks/install_repository.yml index 92c05e2e..f8281978 100644 --- a/roles/obj_install/tasks/install_repository.yml +++ b/roles/obj_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS object packages to list diff --git a/roles/obj_upgrade/tasks/install_repository.yml b/roles/obj_upgrade/tasks/install_repository.yml index 202baa2b..7b3d210e 100644 --- a/roles/obj_upgrade/tasks/install_repository.yml +++ b/roles/obj_upgrade/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS object packages to list diff --git a/roles/perfmon_install/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml index 66ec7112..c3ab021e 100644 --- a/roles/perfmon_install/tasks/install_repository.yml +++ b/roles/perfmon_install/tasks/install_repository.yml @@ -55,6 +55,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure zimon APT repository @@ -68,6 +69,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure ZIMon repository @@ -80,12 +82,14 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' - name: install | package methods set_fact: scale_zimon_sensors_packages: "{{ scale_zimon_sensors_packages }}" scale_zimon_collector_packages: "{{ scale_zimon_collector_packages }}" - when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - name: install | package methods set_fact: diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index dacc7c87..abd705c8 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -57,6 +57,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure zimon APT repository @@ -70,6 +71,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure ZIMon repository @@ -82,6 +84,8 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' - name: upgrade | package methods set_fact: @@ -180,7 +184,7 @@ - pmswiftd when: - scale_pmswift_status.rc is defined and scale_pmswift_status.rc == 0 - when: + when: - (is_scale_pmswift_pkg_installed | bool) - name: upgrade | pmswift packages to list diff --git a/roles/smb_install/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml index 6baa89f7..312cc102 100644 --- a/roles/smb_install/tasks/install_repository.yml +++ b/roles/smb_install/tasks/install_repository.yml @@ -46,6 +46,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure smb zypper repository @@ -57,6 +58,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure smb APT repository @@ -70,6 +72,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS smb packages to list diff --git a/roles/smb_upgrade/tasks/install_repository.yml b/roles/smb_upgrade/tasks/install_repository.yml index cbfe8dc7..0a9c2412 100644 --- a/roles/smb_upgrade/tasks/install_repository.yml +++ b/roles/smb_upgrade/tasks/install_repository.yml @@ -46,6 +46,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure smb zypper repository @@ -57,6 +58,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure smb APT repository @@ -70,6 +72,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS smb packages to list From 6f22fc8df1f3e7e689f6041242c3f591d68b85e9 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 15:24:25 +0100 Subject: [PATCH 025/113] Rename 'is_protocol_node' to 'scale_protocol_node' Signed-off-by: Achim Christ --- roles/ces_common/tasks/check.yml | 6 +++--- roles/ece_prepare/tasks/check.yml | 4 ++-- roles/hdfs_prepare/tasks/check.yml | 2 +- roles/nfs_prepare/tasks/check.yml | 2 +- roles/obj_prepare/tasks/check.yml | 4 ++-- roles/smb_prepare/tasks/check.yml | 2 +- samples/set_json_variables.yml | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/ces_common/tasks/check.yml b/roles/ces_common/tasks/check.yml index 9e571685..4eaa9737 100644 --- a/roles/ces_common/tasks/check.yml +++ b/roles/ces_common/tasks/check.yml @@ -17,7 +17,7 @@ - name: check | Collect all protocol nodes set_fact: scale_protocol_node_list: "{{ scale_protocol_node_list + [hostvars[item]['scale_daemon_nodename']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -34,7 +34,7 @@ - name: check | Collect all protocol node OS set_fact: scale_os_list: "{{ scale_os_list + [hostvars[item]['ansible_distribution']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -50,7 +50,7 @@ - name: check | Collect all protocol node architecture set_fact: scale_arch_list: "{{ scale_arch_list + [hostvars[item]['ansible_architecture']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost diff --git a/roles/ece_prepare/tasks/check.yml b/roles/ece_prepare/tasks/check.yml index 6367d4f3..b4d26571 100644 --- a/roles/ece_prepare/tasks/check.yml +++ b/roles/ece_prepare/tasks/check.yml @@ -22,9 +22,9 @@ - name: check | Check if ece node is not protocol node assert: that: - - not hostvars[item]['is_protocol_node']|bool + - not hostvars[item]['scale_protocol_node']|bool fail_msg: "ECE node cannot be protocol node" - when: hostvars[item]['is_protocol_node'] is defined + when: hostvars[item]['scale_protocol_node'] is defined with_items: - "{{ scale_ece_nodes_list }}" run_once: true diff --git a/roles/hdfs_prepare/tasks/check.yml b/roles/hdfs_prepare/tasks/check.yml index ce06bb9b..e29978bf 100644 --- a/roles/hdfs_prepare/tasks/check.yml +++ b/roles/hdfs_prepare/tasks/check.yml @@ -13,7 +13,7 @@ - name: check | Collect all protocol nodes set_fact: scale_protocol_nodes_list: "{{ scale_protocol_nodes_list + [hostvars[hosts]['ansible_fqdn']] }}" - when: hostvars[hosts]['is_protocol_node'] is defined and hostvars[hosts]['is_protocol_node']|bool + when: hostvars[hosts]['scale_protocol_node'] is defined and hostvars[hosts]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" loop_control: diff --git a/roles/nfs_prepare/tasks/check.yml b/roles/nfs_prepare/tasks/check.yml index 4dd769a8..d61b7cb1 100644 --- a/roles/nfs_prepare/tasks/check.yml +++ b/roles/nfs_prepare/tasks/check.yml @@ -12,7 +12,7 @@ - name: check | Collect all nfs nodes set_fact: scale_nfs_nodes_list: "{{ scale_nfs_nodes_list + [hostvars[item]['ansible_fqdn']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost diff --git a/roles/obj_prepare/tasks/check.yml b/roles/obj_prepare/tasks/check.yml index fe80e7b1..8678ecea 100644 --- a/roles/obj_prepare/tasks/check.yml +++ b/roles/obj_prepare/tasks/check.yml @@ -13,7 +13,7 @@ - name: check | Collect all object nodes set_fact: scale_obj_nodes_list: "{{ scale_obj_nodes_list + [hostvars[item]['ansible_fqdn']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -34,6 +34,6 @@ Object is only supported for Rhel 8 and higher! with_items: - "{{ ansible_play_hosts }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool delegate_to: localhost run_once: true diff --git a/roles/smb_prepare/tasks/check.yml b/roles/smb_prepare/tasks/check.yml index 63bf7843..3eb6678f 100644 --- a/roles/smb_prepare/tasks/check.yml +++ b/roles/smb_prepare/tasks/check.yml @@ -6,7 +6,7 @@ - name: check | Collect all smb nodes set_fact: scale_smb_node_list: "{{ scale_smb_node_list + [hostvars[item]['ansible_fqdn']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost diff --git a/samples/set_json_variables.yml b/samples/set_json_variables.yml index 2b8386af..30a6abad 100644 --- a/samples/set_json_variables.yml +++ b/samples/set_json_variables.yml @@ -58,7 +58,7 @@ scale_remotemount_storage_gui_hostname: "{{ scale_remotemount.storage_gui_hostname | default(omit) }}" scale_remotemount_storage_filesystem_name: "{{ scale_remotemount.storage_filesystem_name | default(omit) }}" scale_sync_replication_config: "{{ scale_cluster.scale_sync_replication_config | default(false) }}" - is_protocol_node: "{{ item.is_protocol_node | default(false) }}" + scale_protocol_node: "{{ item.is_protocol_node | default(false) }}" scale_callhome_params: "{{ scale_callhome_params | default(omit) }}" scale_protocols: "{{ scale_protocols | default(omit) }}" scale_hdfs_cluster: "{{ scale_hdfs_cluster | default(omit) }}" From 1d0c29db5e5ebe71309102d543c605281d75db22 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 15:30:30 +0100 Subject: [PATCH 026/113] Rename 'is_admin_node' to 'scale_admin_node' Signed-off-by: Achim Christ --- roles/core_configure/defaults/main.yml | 4 ++-- roles/core_configure/tasks/check.yml | 11 ++++++----- roles/core_configure/tasks/removenode.yml | 2 +- samples/set_json_variables.yml | 2 +- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/roles/core_configure/defaults/main.yml b/roles/core_configure/defaults/main.yml index c7a23844..7769cc72 100644 --- a/roles/core_configure/defaults/main.yml +++ b/roles/core_configure/defaults/main.yml @@ -57,7 +57,7 @@ scale_storage_filesystem_defaults: defaultDataReplicas: 1 numNodes: 32 automaticMountOption: true - + ## defaultMountPoint will be this prefix, followed by the filesystem name defaultMountPoint_prefix: /mnt/ ## Overwrite existing NSDs - if set to 'true' then disks will *not* be checked @@ -83,4 +83,4 @@ scale_node_role_change: true scale_node_update_check: true ## admin node flag -is_admin_node: false +scale_admin_node: false diff --git a/roles/core_configure/tasks/check.yml b/roles/core_configure/tasks/check.yml index d87e881c..b7a2af3f 100644 --- a/roles/core_configure/tasks/check.yml +++ b/roles/core_configure/tasks/check.yml @@ -22,13 +22,14 @@ scale_admin_nodename: "{{ scale_admin_nodename }}" when: hostvars[inventory_hostname].scale_admin_nodename is undefined -- set_fact: is_admin_node=false - when: hostvars[inventory_hostname].is_admin_node is undefined +- set_fact: + scale_admin_node: false + when: hostvars[inventory_hostname].scale_admin_node is undefined - name: check | Assign default admin nodes set_fact: - is_admin_node: true - when: true not in ansible_play_hosts | map('extract', hostvars, 'is_admin_node') | map('bool') | list + scale_admin_node: true + when: true not in ansible_play_hosts | map('extract', hostvars, 'scale_admin_node') | map('bool') | list with_sequence: start=1 end={{ [ ansible_play_hosts | length, 1 ] | min }} run_once: true delegate_to: "{{ ansible_play_hosts[item | int - 1] }}" @@ -38,7 +39,7 @@ add_host: name: "{{ item }}" groups: scale_cluster_admin_nodes - when: hostvars[item].is_admin_node is defined and hostvars[item].is_admin_node | bool + when: hostvars[item].scale_admin_node is defined and hostvars[item].scale_admin_node | bool with_items: "{{ ansible_play_hosts }}" changed_when: false diff --git a/roles/core_configure/tasks/removenode.yml b/roles/core_configure/tasks/removenode.yml index a6aafbf6..5df9c6d7 100644 --- a/roles/core_configure/tasks/removenode.yml +++ b/roles/core_configure/tasks/removenode.yml @@ -22,7 +22,7 @@ groups: scale_cluster_members when: - hostvars[item].scale_state is defined and hostvars[item].scale_state == 'present' - - hostvars[item].is_admin_node is defined and hostvars[item].is_admin_node|bool + - hostvars[item].scale_admin_node is defined and hostvars[item].scale_admin_node|bool - hostvars[item].scale_cluster_clusterId.stdout with_items: "{{ ansible_play_hosts }}" changed_when: false diff --git a/samples/set_json_variables.yml b/samples/set_json_variables.yml index 30a6abad..f4f5f376 100644 --- a/samples/set_json_variables.yml +++ b/samples/set_json_variables.yml @@ -37,7 +37,7 @@ scale_cluster_gui: "{{ item.is_gui_server | default(false) }}" scale_zimon_collector: "{{ item.is_collector_node | default(false) }}" state: "{{ item.state | default('present') }}" - is_admin_node: "{{ item.is_admin_node | default('true') }}" + scale_admin_node: "{{ item.is_admin_node | default('true') }}" scale_nodeclass: "{{ item.scale_nodeclass | default(omit) }}" scale_config: "{{ scale_config | default(omit) }}" ansible_ssh_private_key_file: "{{ item.ansible_ssh_private_key_file | default(omit) }}" From 496e2c07f25124fc305f78eb678410af62171f7b Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 15:36:29 +0100 Subject: [PATCH 027/113] Rename variables in sample inventory file Signed-off-by: Achim Christ --- samples/hosts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/hosts b/samples/hosts index 71e20323..30562743 100644 --- a/samples/hosts +++ b/samples/hosts @@ -1,6 +1,6 @@ # hosts: # Sample host file for deploying IBM Spectrum Scale (GPFS) cluster [cluster01] -host-vm1 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=true -host-vm2 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false -host-vm3 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false +host-vm1 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false scale_protocol_node=true +host-vm2 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false scale_protocol_node=false +host-vm3 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false scale_protocol_node=false From e51681f03f0d702a4ffa85ad2a79306e0bedffef Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 15:37:10 +0100 Subject: [PATCH 028/113] Rename 'is_nsd_server' to 'scale_nsd_server' Signed-off-by: Achim Christ --- roles/ece_prepare/tasks/check.yml | 5 ++--- samples/set_json_variables.yml | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/ece_prepare/tasks/check.yml b/roles/ece_prepare/tasks/check.yml index b4d26571..030b8b3d 100644 --- a/roles/ece_prepare/tasks/check.yml +++ b/roles/ece_prepare/tasks/check.yml @@ -44,11 +44,10 @@ - name: check | Check if ece node is not nsd node assert: that: - - not hostvars[item]['is_nsd_server']|bool + - not hostvars[item]['scale_nsd_server']|bool fail_msg: "ECE node cannot be nsd server" - when: hostvars[item]['is_nsd_server'] is defined + when: hostvars[item]['scale_nsd_server'] is defined with_items: - "{{ scale_ece_nodes_list }}" run_once: true any_errors_fatal: true - diff --git a/samples/set_json_variables.yml b/samples/set_json_variables.yml index f4f5f376..b550d74c 100644 --- a/samples/set_json_variables.yml +++ b/samples/set_json_variables.yml @@ -36,6 +36,7 @@ scale_cluster_manager: "{{ item.is_manager_node | default(false) }}" scale_cluster_gui: "{{ item.is_gui_server | default(false) }}" scale_zimon_collector: "{{ item.is_collector_node | default(false) }}" + scale_nsd_server: "{{ item.is_nsd_server | default(false) }}" state: "{{ item.state | default('present') }}" scale_admin_node: "{{ item.is_admin_node | default('true') }}" scale_nodeclass: "{{ item.scale_nodeclass | default(omit) }}" From 59c95fc7a435691aa31f6461348321b9cbd8ec39 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 15:41:27 +0100 Subject: [PATCH 029/113] Rename 'gpfs_cluster_system_profile' to 'scale_cluster_system_profile' Signed-off-by: Achim Christ --- roles/core_configure/tasks/cluster.yml | 6 +++--- roles/core_configure/vars/main.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/core_configure/tasks/cluster.yml b/roles/core_configure/tasks/cluster.yml index 624211ba..d1030b83 100644 --- a/roles/core_configure/tasks/cluster.yml +++ b/roles/core_configure/tasks/cluster.yml @@ -79,7 +79,7 @@ delegate_to: localhost when: - scale_cluster_profile_name is defined and scale_cluster_profile_name != 'None' - - scale_cluster_profile_name not in gpfs_cluster_system_profile + - scale_cluster_profile_name not in scale_cluster_system_profile - block: - name: cluster | cluster profile name validation @@ -98,7 +98,7 @@ delegate_to: localhost when: - scale_cluster_profile_name is defined and scale_cluster_profile_name != 'None' - - scale_cluster_profile_name not in gpfs_cluster_system_profile + - scale_cluster_profile_name not in scale_cluster_system_profile - block: - name: cluster | Copy user defined profile @@ -108,7 +108,7 @@ mode: '0444' when: - scale_cluster_profile_name is defined and scale_cluster_profile_name != 'None' - - scale_cluster_profile_name not in gpfs_cluster_system_profile + - scale_cluster_profile_name not in scale_cluster_system_profile # # Create new cluster diff --git a/roles/core_configure/vars/main.yml b/roles/core_configure/vars/main.yml index 7abdb4cc..aaef8ee8 100644 --- a/roles/core_configure/vars/main.yml +++ b/roles/core_configure/vars/main.yml @@ -25,7 +25,7 @@ scale_active_states: - active # scale supported profile -gpfs_cluster_system_profile: +scale_cluster_system_profile: - gpfsprotocoldefaults - gpfsprotocolrandomio From 2af91b514d038acd8c82abd9783418dbc0f2ef58 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 16:05:30 +0100 Subject: [PATCH 030/113] Rename 'scale_fileauditlogging_enable' to 'scale_fal_enable' Signed-off-by: Achim Christ --- roles/fal_configure/defaults/main.yml | 2 +- roles/fal_configure/tasks/main.yml | 4 ++-- roles/fal_install/defaults/main.yml | 2 +- roles/fal_install/tasks/install_dir_pkg.yml | 2 +- roles/fal_install/tasks/install_local_pkg.yml | 2 +- roles/fal_install/tasks/install_remote_pkg.yml | 2 +- roles/fal_install/tasks/install_repository.yml | 2 +- roles/fal_install/tasks/main.yml | 2 +- roles/fal_upgrade/defaults/main.yml | 2 +- roles/fal_upgrade/tasks/install_dir_pkg.yml | 2 +- roles/fal_upgrade/tasks/install_local_pkg.yml | 2 +- roles/fal_upgrade/tasks/install_remote_pkg.yml | 2 +- roles/fal_upgrade/tasks/install_repository.yml | 2 +- roles/fal_upgrade/tasks/main.yml | 2 +- roles/fal_verify/tasks/main.yml | 2 +- 15 files changed, 16 insertions(+), 16 deletions(-) diff --git a/roles/fal_configure/defaults/main.yml b/roles/fal_configure/defaults/main.yml index efa0389d..687fdbd7 100644 --- a/roles/fal_configure/defaults/main.yml +++ b/roles/fal_configure/defaults/main.yml @@ -3,7 +3,7 @@ # either edit this file or define your own variables to override the defaults ## Flag to enable fileauditlogging -scale_fileauditlogging_enable: true +scale_fal_enable: true ## Default filesystem parameters for file audit logging- ## can be overridden for each filesystem individually diff --git a/roles/fal_configure/tasks/main.yml b/roles/fal_configure/tasks/main.yml index 54ba3778..2fda5775 100644 --- a/roles/fal_configure/tasks/main.yml +++ b/roles/fal_configure/tasks/main.yml @@ -3,13 +3,13 @@ - import_tasks: configure.yml tags: configure when: - - scale_fileauditlogging_enable | bool + - scale_fal_enable | bool - scale_filesystem is undefined - scale_storage is defined - import_tasks: configure_fal.yml tags: configure when: - - scale_fileauditlogging_enable | bool + - scale_fal_enable | bool - scale_filesystem is defined - scale_storage is undefined diff --git a/roles/fal_install/defaults/main.yml b/roles/fal_install/defaults/main.yml index efdccd2b..12bfc9f1 100644 --- a/roles/fal_install/defaults/main.yml +++ b/roles/fal_install/defaults/main.yml @@ -19,7 +19,7 @@ scale_auditlogging_packages: - gpfs.java ## Flag to enable fileauditlogging -scale_fileauditlogging_enable: true +scale_fal_enable: true ## To Enabled output from Ansible task in stdout and stderr for some tasks. ## Run the playbook with -vv diff --git a/roles/fal_install/tasks/install_dir_pkg.yml b/roles/fal_install/tasks/install_dir_pkg.yml index cb880dfe..2b49813d 100644 --- a/roles/fal_install/tasks/install_dir_pkg.yml +++ b/roles/fal_install/tasks/install_dir_pkg.yml @@ -75,4 +75,4 @@ with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_install/tasks/install_local_pkg.yml b/roles/fal_install/tasks/install_local_pkg.yml index 6c221455..ef0fd609 100644 --- a/roles/fal_install/tasks/install_local_pkg.yml +++ b/roles/fal_install/tasks/install_local_pkg.yml @@ -139,4 +139,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_install/tasks/install_remote_pkg.yml b/roles/fal_install/tasks/install_remote_pkg.yml index ba07df92..173ae206 100644 --- a/roles/fal_install/tasks/install_remote_pkg.yml +++ b/roles/fal_install/tasks/install_remote_pkg.yml @@ -127,4 +127,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_install/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml index 8e2e9b39..b4e8b4be 100644 --- a/roles/fal_install/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -116,4 +116,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" with_items: - "{{ scale_auditlogging_packages }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_install/tasks/main.yml b/roles/fal_install/tasks/main.yml index ba0074f1..42e3a01d 100644 --- a/roles/fal_install/tasks/main.yml +++ b/roles/fal_install/tasks/main.yml @@ -2,4 +2,4 @@ # tasks file for install - import_tasks: install.yml tags: install - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_upgrade/defaults/main.yml b/roles/fal_upgrade/defaults/main.yml index efdccd2b..12bfc9f1 100644 --- a/roles/fal_upgrade/defaults/main.yml +++ b/roles/fal_upgrade/defaults/main.yml @@ -19,7 +19,7 @@ scale_auditlogging_packages: - gpfs.java ## Flag to enable fileauditlogging -scale_fileauditlogging_enable: true +scale_fal_enable: true ## To Enabled output from Ansible task in stdout and stderr for some tasks. ## Run the playbook with -vv diff --git a/roles/fal_upgrade/tasks/install_dir_pkg.yml b/roles/fal_upgrade/tasks/install_dir_pkg.yml index b43ca33a..9ed654dd 100644 --- a/roles/fal_upgrade/tasks/install_dir_pkg.yml +++ b/roles/fal_upgrade/tasks/install_dir_pkg.yml @@ -75,4 +75,4 @@ with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_upgrade/tasks/install_local_pkg.yml b/roles/fal_upgrade/tasks/install_local_pkg.yml index 070440fc..dc6c35ea 100644 --- a/roles/fal_upgrade/tasks/install_local_pkg.yml +++ b/roles/fal_upgrade/tasks/install_local_pkg.yml @@ -139,4 +139,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_upgrade/tasks/install_remote_pkg.yml b/roles/fal_upgrade/tasks/install_remote_pkg.yml index 195bb418..276146da 100644 --- a/roles/fal_upgrade/tasks/install_remote_pkg.yml +++ b/roles/fal_upgrade/tasks/install_remote_pkg.yml @@ -127,4 +127,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml index 8420eaa7..125b8001 100644 --- a/roles/fal_upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -123,4 +123,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" with_items: - "{{ scale_auditlogging_packages }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_upgrade/tasks/main.yml b/roles/fal_upgrade/tasks/main.yml index ba0074f1..42e3a01d 100644 --- a/roles/fal_upgrade/tasks/main.yml +++ b/roles/fal_upgrade/tasks/main.yml @@ -2,4 +2,4 @@ # tasks file for install - import_tasks: install.yml tags: install - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/fal_verify/tasks/main.yml b/roles/fal_verify/tasks/main.yml index 9b15e230..f1fb03f1 100644 --- a/roles/fal_verify/tasks/main.yml +++ b/roles/fal_verify/tasks/main.yml @@ -2,4 +2,4 @@ # tasks file for postcheck - include_tasks: check.yml tags: always - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) From 3328d6002281c9819e2f963007f5ec093c543b37 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 16:23:43 +0100 Subject: [PATCH 031/113] [NFS] Rename 'install_debuginfo' to 'scale_nfs_install_debuginfo' Signed-off-by: Achim Christ --- roles/nfs_install/defaults/main.yml | 4 ++-- roles/nfs_install/tasks/install_dir_pkg.yml | 2 +- roles/nfs_install/tasks/install_local_pkg.yml | 2 +- roles/nfs_install/tasks/install_remote_pkg.yml | 2 +- roles/nfs_upgrade/defaults/main.yml | 2 +- roles/nfs_upgrade/tasks/install_dir_pkg.yml | 2 +- roles/nfs_upgrade/tasks/install_local_pkg.yml | 2 +- roles/nfs_upgrade/tasks/install_remote_pkg.yml | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/nfs_install/defaults/main.yml b/roles/nfs_install/defaults/main.yml index 69b23edd..22279143 100644 --- a/roles/nfs_install/defaults/main.yml +++ b/roles/nfs_install/defaults/main.yml @@ -23,10 +23,10 @@ scale_nfs_debs: ## pm ganesha package for nfs performance monitoring scale_pm_package: - - gpfs.pm-ganesha + - gpfs.pm-ganesha ## Temporary directory to copy installation package to ## (local package installation method) scale_install_localpkg_tmpdir_path: /tmp ## Flag to install ganesha debug package -install_debuginfo: true +scale_nfs_install_debuginfo: true diff --git a/roles/nfs_install/tasks/install_dir_pkg.yml b/roles/nfs_install/tasks/install_dir_pkg.yml index e8d17207..17ad0e4e 100644 --- a/roles/nfs_install/tasks/install_dir_pkg.yml +++ b/roles/nfs_install/tasks/install_dir_pkg.yml @@ -231,7 +231,7 @@ - name: install | remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/nfs_install/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml index 80265dd8..d43568af 100644 --- a/roles/nfs_install/tasks/install_local_pkg.yml +++ b/roles/nfs_install/tasks/install_local_pkg.yml @@ -352,7 +352,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/nfs_install/tasks/install_remote_pkg.yml b/roles/nfs_install/tasks/install_remote_pkg.yml index 60196221..56472bee 100644 --- a/roles/nfs_install/tasks/install_remote_pkg.yml +++ b/roles/nfs_install/tasks/install_remote_pkg.yml @@ -324,7 +324,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - name: List all GPFS package to be installed debug: diff --git a/roles/nfs_upgrade/defaults/main.yml b/roles/nfs_upgrade/defaults/main.yml index a4f272fd..2737c4e8 100644 --- a/roles/nfs_upgrade/defaults/main.yml +++ b/roles/nfs_upgrade/defaults/main.yml @@ -28,4 +28,4 @@ scale_pm_package: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install ganesha debug package -install_debuginfo: true +scale_nfs_install_debuginfo: true diff --git a/roles/nfs_upgrade/tasks/install_dir_pkg.yml b/roles/nfs_upgrade/tasks/install_dir_pkg.yml index 1371923d..37ba19b1 100644 --- a/roles/nfs_upgrade/tasks/install_dir_pkg.yml +++ b/roles/nfs_upgrade/tasks/install_dir_pkg.yml @@ -231,7 +231,7 @@ - name: upgrade | remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/nfs_upgrade/tasks/install_local_pkg.yml b/roles/nfs_upgrade/tasks/install_local_pkg.yml index 4ddf3c58..d095eb26 100644 --- a/roles/nfs_upgrade/tasks/install_local_pkg.yml +++ b/roles/nfs_upgrade/tasks/install_local_pkg.yml @@ -318,4 +318,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/nfs_upgrade/tasks/install_remote_pkg.yml b/roles/nfs_upgrade/tasks/install_remote_pkg.yml index a3ee191e..b96c0651 100644 --- a/roles/nfs_upgrade/tasks/install_remote_pkg.yml +++ b/roles/nfs_upgrade/tasks/install_remote_pkg.yml @@ -324,4 +324,4 @@ - name: upgrade | remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution From 7f4db07db292b221415ff14516e9cf8c1faad428 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 16:27:46 +0100 Subject: [PATCH 032/113] [SMB] Rename 'install_debuginfo' to 'scale_smb_install_debuginfo' Signed-off-by: Achim Christ --- roles/smb_install/defaults/main.yml | 2 +- roles/smb_install/tasks/install_dir_pkg.yml | 2 +- roles/smb_install/tasks/install_local_pkg.yml | 2 +- roles/smb_install/tasks/install_remote_pkg.yml | 2 +- roles/smb_upgrade/defaults/main.yml | 2 +- roles/smb_upgrade/tasks/install_dir_pkg.yml | 2 +- roles/smb_upgrade/tasks/install_local_pkg.yml | 2 +- roles/smb_upgrade/tasks/install_remote_pkg.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/smb_install/defaults/main.yml b/roles/smb_install/defaults/main.yml index 4c7c0f48..5d2f651e 100644 --- a/roles/smb_install/defaults/main.yml +++ b/roles/smb_install/defaults/main.yml @@ -16,4 +16,4 @@ scale_smb_packages: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install smb debug package -install_debuginfo: true +scale_smb_install_debuginfo: true diff --git a/roles/smb_install/tasks/install_dir_pkg.yml b/roles/smb_install/tasks/install_dir_pkg.yml index df5431fa..0af856f1 100644 --- a/roles/smb_install/tasks/install_dir_pkg.yml +++ b/roles/smb_install/tasks/install_dir_pkg.yml @@ -101,7 +101,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb_install/tasks/install_local_pkg.yml b/roles/smb_install/tasks/install_local_pkg.yml index d59dcae6..4e7666a0 100644 --- a/roles/smb_install/tasks/install_local_pkg.yml +++ b/roles/smb_install/tasks/install_local_pkg.yml @@ -209,7 +209,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb_install/tasks/install_remote_pkg.yml b/roles/smb_install/tasks/install_remote_pkg.yml index aff4c0eb..3b9ada75 100644 --- a/roles/smb_install/tasks/install_remote_pkg.yml +++ b/roles/smb_install/tasks/install_remote_pkg.yml @@ -181,7 +181,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb_upgrade/defaults/main.yml b/roles/smb_upgrade/defaults/main.yml index 4c7c0f48..5d2f651e 100644 --- a/roles/smb_upgrade/defaults/main.yml +++ b/roles/smb_upgrade/defaults/main.yml @@ -16,4 +16,4 @@ scale_smb_packages: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install smb debug package -install_debuginfo: true +scale_smb_install_debuginfo: true diff --git a/roles/smb_upgrade/tasks/install_dir_pkg.yml b/roles/smb_upgrade/tasks/install_dir_pkg.yml index 934ab38e..faf89d7b 100644 --- a/roles/smb_upgrade/tasks/install_dir_pkg.yml +++ b/roles/smb_upgrade/tasks/install_dir_pkg.yml @@ -101,7 +101,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb_upgrade/tasks/install_local_pkg.yml b/roles/smb_upgrade/tasks/install_local_pkg.yml index 1698707a..ccb69e82 100644 --- a/roles/smb_upgrade/tasks/install_local_pkg.yml +++ b/roles/smb_upgrade/tasks/install_local_pkg.yml @@ -217,4 +217,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/smb_upgrade/tasks/install_remote_pkg.yml b/roles/smb_upgrade/tasks/install_remote_pkg.yml index 633213b4..10c8fdc1 100644 --- a/roles/smb_upgrade/tasks/install_remote_pkg.yml +++ b/roles/smb_upgrade/tasks/install_remote_pkg.yml @@ -150,4 +150,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution From 4f195d10b12748934f1db59702ce13ba47163d55 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 16:32:00 +0100 Subject: [PATCH 033/113] [HDFS] Rename 'install_debuginfo' to 'scale_hdfs_install_debuginfo' Signed-off-by: Achim Christ --- roles/hdfs_install/defaults/main.yml | 2 +- roles/hdfs_install/tasks/install_dir_pkg.yml | 2 +- roles/hdfs_install/tasks/install_local_pkg.yml | 2 +- roles/hdfs_upgrade/defaults/main.yml | 2 +- roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml | 2 +- roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/hdfs_install/defaults/main.yml b/roles/hdfs_install/defaults/main.yml index 054aec8a..bbe09f7c 100644 --- a/roles/hdfs_install/defaults/main.yml +++ b/roles/hdfs_install/defaults/main.yml @@ -12,7 +12,7 @@ scale_hdfs_packages: gpfs.hdfs-protocol scale_install_localpkg_tmpdir_path: /tmp ## Flag to install hdfs debug package -install_debuginfo: true +scale_hdfs_install_debuginfo: true # Directory to install 3.1.1.x hdfs package hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' diff --git a/roles/hdfs_install/tasks/install_dir_pkg.yml b/roles/hdfs_install/tasks/install_dir_pkg.yml index 035fce33..7cf61453 100644 --- a/roles/hdfs_install/tasks/install_dir_pkg.yml +++ b/roles/hdfs_install/tasks/install_dir_pkg.yml @@ -91,5 +91,5 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/hdfs_install/tasks/install_local_pkg.yml b/roles/hdfs_install/tasks/install_local_pkg.yml index be9b38b3..a852ab3e 100644 --- a/roles/hdfs_install/tasks/install_local_pkg.yml +++ b/roles/hdfs_install/tasks/install_local_pkg.yml @@ -155,4 +155,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/hdfs_upgrade/defaults/main.yml b/roles/hdfs_upgrade/defaults/main.yml index cfdecd12..fe41dc8b 100644 --- a/roles/hdfs_upgrade/defaults/main.yml +++ b/roles/hdfs_upgrade/defaults/main.yml @@ -13,7 +13,7 @@ scale_hdfs_packages: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install hdfs debug package -install_debuginfo: true +scale_hdfs_install_debuginfo: true # Directory to install 3.1.1.x hdfs package hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' diff --git a/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml index 95af2da2..2bb76618 100644 --- a/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml +++ b/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml @@ -87,5 +87,5 @@ - name: remove debuginfo from packages set_fact: scale_upgrade_all_packages: "{{ scale_upgrade_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml index ef1762fd..5e1c089c 100644 --- a/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml +++ b/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml @@ -154,5 +154,5 @@ - name: remove debuginfo from packages set_fact: scale_upgrade_all_packages: "{{ scale_upgrade_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution From 3c1ff31d162bad0c5fd0e2c79db62314777a73f4 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 16:38:16 +0100 Subject: [PATCH 034/113] Fix role directory name for default variables Signed-off-by: Achim Christ --- roles/obj_prepare/{default => defaults}/main.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/obj_prepare/{default => defaults}/main.yml (100%) diff --git a/roles/obj_prepare/default/main.yml b/roles/obj_prepare/defaults/main.yml similarity index 100% rename from roles/obj_prepare/default/main.yml rename to roles/obj_prepare/defaults/main.yml From ceacf0d43e11340e8c3da42f4bc1e827aed1764f Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 16:53:02 +0100 Subject: [PATCH 035/113] Add 'scale_' prefix to 'hdfs_*_version_path' variables Signed-off-by: Achim Christ --- roles/hdfs_install/defaults/main.yml | 8 ++++---- roles/hdfs_install/tasks/install.yml | 18 +++++++++--------- roles/hdfs_upgrade/defaults/main.yml | 8 ++++---- roles/hdfs_upgrade/tasks/upgrade.yml | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/roles/hdfs_install/defaults/main.yml b/roles/hdfs_install/defaults/main.yml index bbe09f7c..c2c41d9a 100644 --- a/roles/hdfs_install/defaults/main.yml +++ b/roles/hdfs_install/defaults/main.yml @@ -15,13 +15,13 @@ scale_install_localpkg_tmpdir_path: /tmp scale_hdfs_install_debuginfo: true # Directory to install 3.1.1.x hdfs package -hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package -hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' +scale_hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' # Directory to install 3.1.1.x hdfs package -hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.1.1.x hdfs package -hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' +scale_hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' diff --git a/roles/hdfs_install/tasks/install.yml b/roles/hdfs_install/tasks/install.yml index 5f7589a6..7659b601 100644 --- a/roles/hdfs_install/tasks/install.yml +++ b/roles/hdfs_install/tasks/install.yml @@ -75,12 +75,12 @@ run_once: true delegate_to: localhost -# Run chosen installation method to get list of RPMs +# Run chosen installation method to get list of RPMs - name: install | Set the extracted package directory path set_fact: hdfs_extracted_path: "{{ scale_extracted_path }}" - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path }}" - name: install | Stat extracted packages directory stat: @@ -90,27 +90,27 @@ - include_tasks: prepare_env.yml - block: - - name: install | Fetch hdfs version + - name: install | Fetch hdfs version set_fact: - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path_33 }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_33 }}" when: transparency_33_enabled|bool - name: install | Fetch hdfs rpm dir path for rhel set_fact: - hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" + hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" - name: install | Set correct hdfs rpm dir path for scale release lower 5.1.2 set_fact: - hdfs_rpm_path_rhel: "{{ hdfs_rpm_path_rhel | replace('/rhel/','/rhel7/') }}" + hdfs_rpm_path_rhel: "{{ hdfs_rpm_path_rhel | replace('/rhel/','/rhel7/') }}" when: scale_version is version_compare('5.1.2', '<') - name: install | Fetch hdfs rpm dir path for sles set_fact: - hdfs_rpm_path_sles: "{{ hdfs_sles_version_path }}" - + hdfs_rpm_path_sles: "{{ scale_hdfs_sles_version_path }}" + - name: install | Fetch hdfs rpm dir path for ubuntu set_fact: - hdfs_rpm_path_ubuntu: "{{ hdfs_ubuntu_version_path }}" + hdfs_rpm_path_ubuntu: "{{ scale_hdfs_ubuntu_version_path }}" run_once: true delegate_to: localhost diff --git a/roles/hdfs_upgrade/defaults/main.yml b/roles/hdfs_upgrade/defaults/main.yml index fe41dc8b..74f48ac1 100644 --- a/roles/hdfs_upgrade/defaults/main.yml +++ b/roles/hdfs_upgrade/defaults/main.yml @@ -16,13 +16,13 @@ scale_install_localpkg_tmpdir_path: /tmp scale_hdfs_install_debuginfo: true # Directory to install 3.1.1.x hdfs package -hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package -hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' +scale_hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' # Directory to install 3.1.1.x hdfs package -hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package -hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' +scale_hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' diff --git a/roles/hdfs_upgrade/tasks/upgrade.yml b/roles/hdfs_upgrade/tasks/upgrade.yml index 3a7b2b71..79d0570a 100644 --- a/roles/hdfs_upgrade/tasks/upgrade.yml +++ b/roles/hdfs_upgrade/tasks/upgrade.yml @@ -51,7 +51,7 @@ - name: upgrade | Set the extracted package directory path set_fact: hdfs_extracted_path: "{{ scale_extracted_path }}" - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path }}" - name: upgrade | Stat extracted packages directory stat: @@ -63,7 +63,7 @@ - block: - name: set_fact: - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path_33 }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_33 }}" when: transparency_33_enabled|bool - name: upgrade | Fetch hdfs rpm dir path for rhel From 9acda81b036ec84cca67228eaefd80fb200d820b Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 25 Nov 2021 18:54:42 +0100 Subject: [PATCH 036/113] Add 'scale_' prefix to HDFS internal variables Signed-off-by: Achim Christ --- roles/hdfs_configure/vars/main.yml | 5 ++--- roles/hdfs_prepare/tasks/check.yml | 2 +- roles/hdfs_prepare/vars/main.yml | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/roles/hdfs_configure/vars/main.yml b/roles/hdfs_configure/vars/main.yml index afc76b28..c4c08015 100644 --- a/roles/hdfs_configure/vars/main.yml +++ b/roles/hdfs_configure/vars/main.yml @@ -6,8 +6,7 @@ scale_command_path: /usr/lpp/mmfs/bin/ # default mm command exection path for hdfs -hdfs_command_path: /usr/lpp/mmfs/hadoop/bin/ +scale_hdfs_command_path: /usr/lpp/mmfs/hadoop/bin/ # default mmhdfs command exection path -mmhdfs_command_path: /usr/lpp/mmfs/hadoop/sbin/ - +scale_mmhdfs_command_path: /usr/lpp/mmfs/hadoop/sbin/ diff --git a/roles/hdfs_prepare/tasks/check.yml b/roles/hdfs_prepare/tasks/check.yml index e29978bf..d1035b3a 100644 --- a/roles/hdfs_prepare/tasks/check.yml +++ b/roles/hdfs_prepare/tasks/check.yml @@ -41,7 +41,7 @@ - fail: msg: "HDFS is not supported on {{ ansible_distribution }} OS." - when: ansible_distribution not in hdfs_os_distribution + when: ansible_distribution not in scale_hdfs_os_distribution delegate_to: "{{ server }}" run_once: true diff --git a/roles/hdfs_prepare/vars/main.yml b/roles/hdfs_prepare/vars/main.yml index 9ca79643..77948826 100644 --- a/roles/hdfs_prepare/vars/main.yml +++ b/roles/hdfs_prepare/vars/main.yml @@ -1,6 +1,6 @@ --- # vars file for precheck ## Supported HDFS os distrubution -hdfs_os_distribution: +scale_hdfs_os_distribution: - RedHat - CentOS From bf74f4092fbaefea7d32d1681261c6f1732c5c86 Mon Sep 17 00:00:00 2001 From: Ole Kristian Date: Tue, 30 Nov 2021 15:18:44 +0100 Subject: [PATCH 037/113] Created new variables to document all variables in the project. Signed-off-by: Ole Kristian --- VARIABLESNEW.md | 132 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 VARIABLESNEW.md diff --git a/VARIABLESNEW.md b/VARIABLESNEW.md new file mode 100644 index 00000000..f1e5f6e2 --- /dev/null +++ b/VARIABLESNEW.md @@ -0,0 +1,132 @@ +Variables used by Spectrum Scale (GPFS) Ansible project +======================================================= + +Variables list is dived into each if the Ansible roles. + +**Core** + +| Role | Variables | Default | Options | User Mandatory | Descriptions | +|------|----------------------------------------|----------------------------|------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Core | scale_architecture: | {{ ansible_architecture }} | x86_64 or ppc64le | no | Specify the Spectrum Scale architecture that you want to install on your nodes. | +| Core | scale_version: | none | 5.x.x.x | yes | Specify the Spectrum Scale version that you want to install on your nodes. With 5.0.5.x. | +| Core | scale_daemon_nodename: | {{ ansible_hostname }} | none | no | Spectrum Scale daemon nodename defaults to nodes hostname | +| Core | scale_admin_nodename: | {{ ansible_hostname }} | none | no | Spectrum Scale admin nodename defaults to nodes hostname | +| Core | scale_state: | present | present,maintenance,absent | no | Desired state of the Spectrum Scale node. present - node will be added to cluster, daemon will be started maintenance - node will be added to cluster, daemon will not be started absent - node will be removed from cluster | +| Core | scale_prepare_disable_selinux | false | true or false | no | Whether or not to disable SELinux. | +| Core | scale_reboot_automatic | false | true or false | no | Whether or not to automatically reboot nodes - if set to false then only a message is printed. If set to true then nodes are automatically rebooted (dangerous!). | +| Core | scale_prepare_enable_ssh_login | false | true or false | no | Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). | +| Core | scale_prepare_restrict_ssh_address | false | true or false | no | Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires scale_prepare_enable_ssh_login to be enabled. | +| Core | scale_prepare_disable_ssh_hostkeycheck | false | true or false | no | Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). | +| Core | scale_prepare_exchange_keys | false | true or false | no | Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires scale_prepare_exchange_keys to be enabled, too. | +| Core | scale_prepare_pubkey_path | /root/.ssh/id_rsa.pub | /root/.ssh/gpfskey.pub | no | example: /root/.ssh/gpfskey.pub | +| Core | scale_prepare_disable_firewall | default: false | true or false | no | Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to false and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). | +| Core | scale_install_localpkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to the self-extracting Spectrum Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. | +| Core | scale_install_remotepkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to Spectrum Scale installation package on the remote system (accessible on Ansible managed node). | +| Core | scale_install_repository_url | none | example: http://server/gpfs/ | yes | Specify the URL of the (existing) Spectrum Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository). Note that if this is a URL then a new repository definition will be created. If this variable is set to existing then it is assumed that a repository definition already exists and thus will *not* be created. | +| Core | scale_install_directory_pkg_path | none | example: /tmp/gpfs/ | yes | Specify the path to the user-provided directory, containing all Spectrum Scale packages. Note that for this installation method all packages need to be kept in a single directory. | +| Core | scale_cluster_quorum | false | true or false | no | If you dont specify any quorum nodes then the first seven hosts in your inventory will automatically be assigned the quorum role. even if this variable is false | +| Core | scale_cluster_manager | false | true or false | no | Nodes default manager role - you ll likely want to define per-node roles in your inventory | +| Core | scale_cluster_profile_name: | none | gpfsprotocoldefaults or gpfsprotocolrandomio | no | Specifies a predefined profile of attributes to be applied. System-defined profiles are located in /usr/lpp/mmfs/profiles/. The following system-defined profile names are accepted. gpfsprotocoldefaults and gpfsprotocolrandomio eg. If you want to apply gpfsprotocoldefaults then specify scale_cluster_profile_name: gpfsprotocoldefaults | +| Core | scale_cluster_profile_dir_path | /usr/lpp/mmfs/profiles/ | Path to cluster profile: example: /usr/lpp/mmfs/profiles/ | no | Fixed variable related to mmcrcluster profile. System-defined profiles are located in /usr/lpp/mmfs/profiles/ | +| Core | scale_enable_gpg_check: | true | true or false | no | Enable/disable gpg key flag | +| Core | scale_install_localpkg_tmpdir_path | /tmp | path to folder. | no | Temporary directory to copy installation package to (local package installation method) | +| Core | scale_nodeclass: | none | Name of the nodeclass: example scale nodeclass: - class1 | no | Node classes can be defined on a per-node basis by defining the scale_nodeclass variable. | +| Core | scale_config: | none | scale_config:
  - nodeclass: class1
    params:
        - pagepool: 4G
        - autoload: yes
        - ignorePrefetchLunCount: yes | no | Configuration attributes can be defined as variables for *any* host in the play — the host for which you define the configuration attribute is irrelevant. Refer to the man mmchconfig man page for a list of available configuration attributes. | +| Core | scale_storage: | none | scale_storage:
     filesystem: gpfs01
     blockSize: 4M
     maxMetadataReplicas: 2
     defaultMetadataReplicas: 2
     maxDataReplicas: 2
     defaultDataReplicas: 2
     numNodes: 16
     automaticMountOption: true
     defaultMountPoint: /mnt/gpfs01
     disks:
          - device: /dev/sdb
            nsd: nsd_1
            servers: scale01
            failureGroup: 10
            usage: metadataOnly
            pool: system
          - device: /dev/sdc
            nsd: nsd_2
            servers: scale01
            failureGroup: 10
            usage: dataOnly
            pool: data | no | Refer to man mmchfs and man mmchnsd man pages for a description of these storage parameters. The filesystem parameter is mandatory, servers, and the device parameter is mandatory for each of the file systems disks. All other file system and disk parameters are optional. scale_storage *must* be define using group variables. Do *not* define disk parameters using host variables or inline variables in your playbook. Doing sowould apply them to all hosts in the group/play, thus defining the same disk multiple times... | +| Core | scale_protocol_node: | none | true or false | no | Enable to set node to uses as Protcol Node, by host variable. | +| Core | scale_admin_node | false | true or false | no | Set admin flag on node for Ansible to use. | +| Core | scale_nsd_server | scale_nsd_server | true or false | no | Set nsd flag for installation purpose | +| GUI | scale_gui_hide_tip_callhome | false | true or false | no | Hide the Callhome not enabled tip on gui | +| GUI | scale_cluster_gui: | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | +| GUI | scale_service_gui_start: | true | true or false | no | Wheter or not to start the Scale GUI after installation. | +| GUI | scale_gui_admin_user: | none | admin | no | Spesify a name for the admin user to be created. | +| GUI | scale_gui_admin_password: | none | Admin@GUI! | no | Password to be set on the admin user | +| GUI | scale_gui_admin_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| GUI | scale_gui_user_username: | none | SEC | no | Ekstra Spectrum Scale GUI user. example: Monitor or RestAPI. | +| GUI | scale_gui_user_password: | none | Storage@Scale1 | no | Password for extra user | +| GUI | scale_gui_user_role: | none | SystemAdmin | no | Role access for the extra user. | +| GUI | scale_gui_admin_hc_vault: | none | N/A | no | HasiCorp - Create local Admin user with password from vault, cant be combined with the scale_gui_admin_user | +| GUI | scale_gui_admin_hc_vault_user: | noen | admin | no | Create local admin user and write password to Vault | +| GUI | scale_gui_admin_hc_vault_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| GUI | scale_gui_cert_hc_vault: | false | true or false | no | Generate https Certificate from HasiCorp Vault and import it to Scale GUI. The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normaly the playbook is then run from Terraform.## The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normaly the playbook is then run from Terraform. | +| GUI | scale_gui_password_policy_change: | false | true or false | no | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default | | +| GUI | scale_gui_password_policy: | false | scale_gui_password_policy:
  minLength: 6
  maxAge: 900
  minAge: 0
  remember: 3
  minUpperChars: 0
  minLowerChars: 0
  minSpecialChars: 0
  minDigits: 0
  maxRepeat: 0
  minDiff: 1
  rejectOrAllowUserName: --rejectUserName | | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default.
  scale_gui_password_policy:
  minLength: 6 ## Minimum password length
  maxAge: 900 ## Maximum password age
  minAge: 0 ## Minimum password age
  remember: 3 ## Remember old passwords
  minUpperChars: 0 ## Minimum upper case characters
  minLowerChars: 0 ## Minimum lower case characters
  minSpecialChars: 0 ## Minimum special case characters
  minDigits: 0 ## Minimum digits
  maxRepeat: 0 ## Maximum number of repeat characters
  minDiff: 1 ## Minimum different characters with respect to old password
  rejectOrAllowUserName:
  --rejectUserName ## either --rejectUserName or --allowUserName | +| GUI | scale_gui_ldap_integration: | false | true or false | no | Active Directory information for Managing GUI users in an external AD or LDAP server | +| GUI | scale_gui_ldap: | none | scale_gui_ldap:
  name: 'myad'
  host: 'myad.mydomain.local'
  bindDn: 'CN=servicebind,CN=Users,DC=mydomain,DC=local'
  bindPassword: 'password'
  baseDn: 'CN=Users,DC=mydomain,DC=local'
  port: '389' #Default 389
  type: 'ad' #Default Microsoft Active Directory
  #securekeystore: /tmp/ad.jks #Local on GUI Node
  #secureport: '636' #Default 636 | no | Managing GUI users in an external AD or LDAP Parameters
 Parameter Description
  - **name:** Alias for your LDAP/AD server
  - **host:** The IP address or host name of the LDAP server.
  - **baseDn:** BasedDn string for the repository.
  - **bindDn:** BindDn string for the authentication user.
  - **bindPassword:** Password of the authentication user.
  - **port:** Port number of the LDAP. Default is 389
  - **type:** Repository type (ad, ids, domino, secureway, iplanet, netscape, edirectory or custom). Default is ad.
  - **securekeystore:** Location with file name of the keystore file (.jks, .p12 or .pfx).
  - **secureport:** Port number of the LDAP. 636 over SSL. | +| GUI | scale_gui_groups: | none | scale_gui_groups:
  administrator: 'scale-admin'
  securityadmin: 'scale-securityadmin'
  storageadmin: 'scale-storage-administrator'
  snapadmin: 'scale-snapshot-administrator'
  data_access: 'scale-data-access'
  monitor: 'scale-monitor'
  protocoladmin: 'scale-protocoladmin'
  useradmin: 'scale-useradmin' | no | The LDAP/AD Groups needs to be create in the LDAP. (You don't need created before deployment.)
You'll likely want to define this in your host inventory
Add the mappings that you want and replace the **scale-** with your ldap groups.

The following are the default user groups:
  - **Administrator** - Manages all functions on the system except those deals with managing users, user groups, and authentication.
  - **SecurityAdmin** - Manages all functions on the system, including managing users, user groups, and user authentication.
  - **SystemAdmin** - Manages clusters, nodes, alert logs, and authentication.
  - **StorageAdmin** - Manages disks, file systems, pools, filesets, and ILM policies.
  - **SnapAdmin** - Manages snapshots for file systems and filesets.
  - **DataAccess** - Controls access to data. For example, managing access control lists.
  - **Monitor** - Monitors objects and system configuration but cannot configure, modify, or manage the system or its resources.
  - **ProtocolAdmin** - Manages object storage and data export definitions of SMB and NFS protocols.
  - **UserAdmin** - Manages access for GUI users. Users who are part of this group have edit permissions only in the Access pages of the GUI. Check IBM doc for updated list | +| GUI | scale_gui_email_notification: | false | false or true | no | Enable E-mail notifications in Spectrum Scale GUI | +| GUI | scale_gui_email: | none | scale_gui_email:
  name: 'SMTP_1'
  ipaddress: 'emailserverhost'
  ipport: '25'
  replay_email_address: scale-server-test@acme.com
  contact_name: 'scale-contact-person'
  subject: &cluster&message
  sender_login_id:
  password:
  headertext:
  footertext: | no | - The email feature transmits operational and error-related data in the form of an event notification email.
  - Email notifications can be customized by setting a custom header and footer for the emails and customizing the subject by selecting and combining from the following variables: &message, &messageId, &severity, &dateAndTime, &cluster and &component.
  - **name** - Specifies a name for the e-mail server.
  - **address** - Specifies the address of the e-mail server. Enter the SMTP server IP address or host name. For example, 10.45.45.12 or smtp.example.com.
  - **portNumber** - Specifies the port number of the e-mail server. Optional.
  - **reply_email_address/sender_address** - Specifies the sender's email address.
  - **contact_name/sender_name** - Specifies the sender's name.
  - **subject** Notifications can be customized by setting a custom header and footer or with variable like &cluster&message ## Variables: &message &messageId &severity &dateAndTime &cluster&component
  - **sender_login_id** - Login needed to authenticate sender with email server in case the login is different from the sender address (--reply). Optional.
  - **password** - Password used to authenticate sender address (--reply) or login id (--login) with the email sever | +| GUI | scale_gui_email_recipients: | none | scale_gui_email_recipients:
  name: 'name_email_recipient_name':
  address: 'email_recipient_address@email.com':
  components_security_level: 'SCALEMGMT=WARNING,CESNETWORK=WARNING':
  reports: 'DISK,GPFS,AUTH':
  quotaNotification: '--quotaNotification' ##if defined it enabled quota Notification:
  quotathreshold: '70.0' | no | **Options:**
  - **NAME**: Name of the email Recipients
  - **Address:** userAddress Specifies the address of the e-mail user
  - **Components_security_level**
     - The value scale_gui_email_recipients_components_security_level: Need to contain the **Component** and the **Warning/Security Level**
       - Chose component like **SCALEMGMT** and the security_level of WARNING wil be **SCALEMGMT=ERROR**
       - Security level: Chose the lowest severity of an event for which you want to receive and email. Example, selectin Tip includes events with severity Tip, Warning, and Error in the email.
       - The Severity level is as follows: : **INFO**, **TIP**, **WARNING**, **ERROR**
     **List of all security levels:**
      AFM=WARNING,AUTH=WARNING,BLOCK=WARNING,CESNETWORK=WARNING,CLOUDGATEWAY=WARNING,CLUSTERSTATE=WARNING,DISK=WARNING,FILEAUDITLOG=WARNING,FILESYSTEM=WARNING,GPFS=WARNING,GUI=WARNING,HADOOPCONNECTOR=WARNING,
      KEYSTONE=WARNING,MSGQUEUE=WARNING,NETWORK=WARNING,NFS=WARNING,OBJECT=WARNING,PERFMON=WARNING,SCALEMGMT=WARNING,SMB=WARNING,CUSTOM=WARNING,AUTH_OBJ=WARNING,CES=WARNING,CESIP=WARNING,NODE=WARNING,THRESHOLD=WARNING,WATCHFOLDER=WARNING,NVME=WARNING,POWERHW=WARNING
  - **Reports** listOfComponents
       - Specifies the components to be reported. The tasks generating reports are scheduled by default to send a report once per day. Optional.
        AFM,AUTH,BLOCK,CESNETWORK,CLOUDGATEWAY,CLUSTERSTATE,DISK,FILEAUDITLOG,FILESYSTEM,GPFS,GUI,HADOOPCONNECTOR,KEYSTONE,MSGQUEUE,NETWORK,NFS,OBJECT,PERFMON,SCALEMGMT,SMB,CUSTOM,AUTH_OBJ,CES,CESIP,NODE,THRESHOLD,WATCHFOLDER,NVME,POWERHW
  - **quotaNotification**
     Enables quota notifications which are sent out if the specified threshold is violated. (See --quotathreshold)
  - **quotathreshold** valueInPercent
     - Sets the threshold(percent of the hard limit)for including quota violations in the quota digest report.
     - The default value is 100. The values -3, -2, -1, and zero have special meaning.
     - Specify the value -2 to include all results, even entries where the hard quota not set.
     - Specify the value -1 to include all entries where hard quota is set and current usage is greater than or equal to the soft quota.
     - Specify the value -3 to include all entries where hard quota is not set and current usage is greater than or equal to the soft quota only.
     - Specify the value 0 to include all entries where the hard quota is set.
  Using unlisted options can lead to an error | +| GUI | scale_gui_snmp_notification: | false | true or false | no | Enable SNMP notifications in Spectrum Scale GUI | +| GUI | scale_gui_snmp_server: | false | scale_gui_snmp_server:
  ip_adress: 'snmp_server_host'
  ip_port: '162'
  community: 'Public' | no | - To Configure SNMP Notification.
    - Change the Value:
      - scale_gui_snmp_notification: true
       - ip_adress to your SNMP server/host
       - ip_port to your SNMP port
       - community to your SNMP community | + +**Protocol** + +| Role | variables | Default | Options | User Mandatory | Descriptions | +|-------------------|--------------------------|---------|---------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Protocol/NFS | scale_install_debuginfo: | true | true or false | no | Flag to install ganesha debug package | +| Protocol/SMB | scale_install_debuginfo: | true | true or false | no | Flag to install smb debug package | +| Protocol | scale_protocols: #IPV4 | none | scale_protocols: #IPV4
  smb: true
  nfs: true
  object: true
  export_ip_pool: [192.168.100.100,192.168.100.101]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | To install Spectrum Scale Protocol. Refer to man mmces man pages for a description of these Cluster Export. scale_ces_groups can also be user to group nodes. | +| Protocol | scale_protocols: #Ipv6 | none | scale_protocols: #Ipv6
  smb: true
  nfs: true
  object: true
  interface: [eth0]
  export_ip_pool: [2002:90b:e006:84:250:56ff:feb9:7787]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | For enabling Cluster Export Services in an IPv6 environment one also needs to define an interface parameter: scale_ces_groups can also be user to group nodes. | +| Protocol/Object | scale_ces_obj: | none | scale_ces_obj:
  dynamic_url: False
  enable_s3: False
  local_keystone: True
  enable_file_access: False
  endpoint_hostname: scale-11
  object_fileset: Object_Fileset
  pwd_file: obj_passwd.j2
  admin_user: admin
  admin_pwd: admin001
  database_pwd: admin001 | no | Missing descriptions. + + + +**HDFS** + +| Role | variables | Default | Options | User Mandatory | Descriptions | +|------|----------------------|---------|-----------------------------|----------------|---------------------------------------------------------| +| hdfs | ha_enabled: | false | true or false | no | HA for namenode in HDFS? | +| hdfs | scale_hdfs_clusters: | none | - name: mycluster
  filesystem: gpfs1
  namenodes: ['host-vm1.test.net', 'host-vm2.test.net']
  datanodes: ['host-vm3.test.net', 'host-vm4.test.net', 'host-vm5.test.net']
  datadir: datadir | no | Install IBM Spectrum Scale (HDFS), "Document more" + + +**Performance Monitoring - zimon** + +| Role | variables | Default | Options | User Mandatory | Descriptions | +|-------|------------------------|---------|---------------|----------------|-------------------------------------------------------------------------------| +| zimon | scale_zimon_collector: | false | true or false | no | Nodes default GUI collector role, its install the collector on all GUI nodes. | +| zimon | scale_cluster_gui | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | +| zimon | scale_cluster_zimon | false | true or false | no | Install up zimon enabled | + + +**FileAudit Logging** + +| Role | variables | Default | Options | User Mandatory | Descriptions | +|-------------------------------|------------------------|---------|----------------|----------------|------------------------------------------------------------------------------| +|fal | scale_fal_enable | true | true or false | no | Flag to enable fileauditlogging | + +**CallHome** + +| Role | variables | Default | Options | User Mandatory | Descriptions | +|----------------------------------:|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|----------------|--------------| +| callhome |scale_callhome_params: | None | scale_callhome_params:
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly]
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly] | no | Refer to man mmcallhome man pages for a description of these Call Homes
**scale_callhome_params**:
  is_enabled: true
  customer_name: abc
  customer_email: abc@abc.com
  customer_id: 12345
  customer_country: IN
  proxy_ip:
  proxy_port:
  proxy_user:
  proxy_password:
  proxy_location:
  callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
  callhome_group1: [scale01,scale02,scale03,scale04]
  callhome_schedule: [daily,weekly] | + + +**Remote Mount Filesystem** + +| Role | variables | Default | Options | User Mandatory | Descriptions | +|-------------|-------------------------------------------------|-------------------------------------------------|-------------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| remotemount | scale_remotemount_client_gui_username: | none | username, example admin | yes | Scale User with Administrator or ContainerOperator role/rights | +| remotemount | scale_remotemount_client_gui_password: | none | password for user | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| remotemount | scale_remotemount_client_gui_hostname: | none | 10.10.10.1 | yes | IP or Hostname to Client GUI Node | +| remotemount | scale_remotemount_storage_gui_username: | none | | yes | Scale User with Administrator or ContainerOperator role/rights | +| remotemount | scale_remotemount_storage_gui_password: | none | | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| remotemount | scale_remotemount_storage_gui_hostname: | none | | yes | IP or Hostname to Storage GUI Node | +| remotemount | scale_remotemount_storage_adminnodename: | false | true or false | no | Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true | +| remotemount | scale_remotemount_filesystem_name | none | scale_remotemount_filesystem_name
  scale_remotemount_client_filesystem_name:
  scale_remotemount_client_remotemount_path:
  scale_remotemount_storage_filesystem_name:
  scale_remotemount_access_mount_attributes:
  scale_remotemount_client_mount_fs:
  scale_remotemount_client_mount_priority: 0 | yes | The Values in the list needs to be in list, as we support to mount up more filesystem.

  - Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names.
  - Path to where the filesystem shoul be Mounted: /gpfs01/fs1
  - Storage Cluster filesystem you want to mount: gpfs01
  - Filesystem can be mounted in different access mount: RW or RO
  - Indicates when the file system is to be mounted: options are yes, no, automount (When the file system is first accessed.)
  - File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. valid values: 0 | +| remotemount | scale_remotemount_client_filesystem_name: | none | fs1 | yes | Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names. | +| remotemount | scale_remotemount_client_remotemount_path: | none | /gpfs01/fs1 | yes | Path to where the filesystem shoul be Mounted. | +| remotemount | scale_remotemount_storage_filesystem_name: | none | gpfs01 | yes | Storage Cluster filesystem you want to mount | +| remotemount | scale_remotemount_access_mount_attributes: | rw | RW, RO | no | Filesystem can be mounted in different access mount: RW or RO | +| remotemount | scale_remotemount_client_mount_fs: | yes | yes, no , automount | no | Indicates when the file system is to be mounted:** options are yes, no, automount (When the file system is first accessed.) | +| remotemount | scale_remotemount_client_mount_priority: 0 | 0 | 0 - x | no | File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. | +| remotemount | scale_remotemount_client_no_gui: | false | true or false | no | If Accessing/Client Cluster dont have GUI, it will use CLI/SSH against Client Cluster | +| remotemount | scale_remotemount_storage_pub_key_location: | Defaults to /tmp/storage_cluster_public_key.pub | path to pubkey | no | Client Cluster (Access) is downloading the pubkey from Owning cluster and importing it | +| remotemount | scale_remotemount_cleanup_remote_mount: | false | true or false | no | Unmounts, remove the filesystem, and the connection between Accessing/Client cluster and Owner/Storage Cluster. This now works on clusters that not have GUI/RESTAPI interface on Client Cluster | +| remotemount | scale_remotemount_debug: | false | true or false | no | Outputs debug information after tasks | +| remotemount | scale_remotemount_forceRun: | false | true or false | no | If scale_remotemount_forceRun is passed in, then the playbook is attempting to run remote_mount role regardless of whether the filesystem is configured | +| remotemount | scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to | no | Client Cluster (Access) pubkey that is changed from json to right format and then used when creating connection | +| remotemount | scale_remotemount_storage_pub_key_location_json | /tmp/storage_cluster_public_key_json.pub | path to | no | Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster | +| remotemount | scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| + From a44b441d94d439f024f1bb3e05e5c2e78275b2ea Mon Sep 17 00:00:00 2001 From: Ole Kristian Date: Thu, 9 Dec 2021 18:46:21 +0100 Subject: [PATCH 038/113] - Fixed som logic when remotemount is hafeway setup between client and storage. - When adding the storage Cluster as a remotecluster in client cluster we need to specify what nodes should be used as contact node, and in normal cases all nodes would be fine. In case we have AFM Gateway nodes, or Cloud nodes TFCT, we want to use the RESTAPI filter to remove those nodes, so they are not used. - Checks that GPFS deamon is started on GUI node, it will check the first server in NodeClass GUI_MGMT_SERVERS, this is the same flag to check when trying to mount up filesystems on all nodes. Check can be disabled with changing the flag to false. - Default it will try to mount the filesystem on all client cluster (accessing) nodes, added function to add comma separated list of servers. example: scale1-test,scale2-test - Minor fixes in role. Signed-off-by: Ole Kristian --- docs/README.REMOTEMOUNT.md | 38 +++++- roles/remotemount_configure/defaults/main.yml | 30 ++++- roles/remotemount_configure/tasks/main.yml | 109 +++++++++++++++++- .../tasks/mount_filesystem_api_cli.yml | 17 ++- .../tasks/mount_filesystems.yml | 52 ++++++++- .../tasks/remotecluster.yml | 16 +-- .../tasks/remotecluster_api_cli.yml | 64 ++++++++-- 7 files changed, 292 insertions(+), 34 deletions(-) diff --git a/docs/README.REMOTEMOUNT.md b/docs/README.REMOTEMOUNT.md index 729d61a2..3d896aaa 100644 --- a/docs/README.REMOTEMOUNT.md +++ b/docs/README.REMOTEMOUNT.md @@ -3,7 +3,7 @@ IBM Spectrum Scale (GPFS) Remote Cluster and Mount Role Role Definition ------------------------------- -- Role name: **remote_mount** +- Role name: **remotemount_configure** - Definition: - This role adds support for consumers of the playbook to remote mount a IBM Spectrum Scale filesystem from a Storage cluster. The roles leverage the Spectrum Scale REST API , meaning 5.0.5.2 or later versions of Scale contains the endpoints. @@ -74,6 +74,35 @@ The following variables would need to be defined by the user, either as vars to - ``scale_remotemount_storage_adminnodename: true `` (Default to: false) **Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true** + +- ``scale_remotemount_gpfsdemon_check: true ``(Default to: true) **Checks that GPFS deamon is started on GUI node, it will check the first server in NodeClass GUI_MGMT_SERVERS, this is the same flag to check when trying to mount up filesystems on all nodes. Check can be disabled with changing the flag to false.** + +- ``scale_remotemount_client_mount_on_nodes: all``(Default to: all) **Default it will try to mount the filesystem on all client cluster (accessing) nodes, here you can replace this with a comma separated list of servers. example: scale1-test,scale2-test** + + +- ``scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetwork.daemonNodeName&filter=roles.gatewayNode%3Dfalse' `` + - When adding the storage Cluster as a remotecluster in client cluster we need to specify what nodes should be used as contact node, and in normal cases **all** nodes would be fine. In case we have AFM Gateway nodes, or Cloud nodes TFCT, we want to use the RESTAPI filter to remove those nodes, so they are not used. + + - **Example**: + - Default is only list all servers that have (AFM) gatewayNode=false. ``scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetwork.daemonNodeName&filter=roles.gatewayNode%3Dfalse'`` + - No AFM and CloudGateway: ``?fields=roles.gatewayNode%2Cnetwork.daemonNodeName%2Croles.cloudGatewayNode&filter=roles.gatewayNode%3Dfalse%2Croles.cloudGatewayNode%3Dfalse`` + - To create your own filter, go to the API Explorer on Spectrum Scale GUI. https://IP-TO-GUI-NODE/ibm/api/explorer/#!/Spectrum_Scale_REST_API_v2/nodesGetv2 + + Roles in version 5.1.1.3 + + ```json + "roles": { + "cesNode": false, + "cloudGatewayNode": false, + "cnfsNode": false, + "designation": "quorum", + "gatewayNode": false, + "managerNode": false, + "otherNodeRoles": "perfmonNode", + "quorumNode": true, + "snmpNode": false + ``` + Example Playbook's ------------------------------- @@ -82,7 +111,7 @@ There is also example playbook's in samples folder. ### Playbook: Storage Cluster and Client Cluster have GUI You can use localhost, then all RestAPI call will occur over https to Storage and Client Cluster locally from where you run the Ansible playbook - +```yaml - hosts: localhost vars: scale_remotemount_client_gui_username: admin @@ -96,6 +125,7 @@ You can use localhost, then all RestAPI call will occur over https to Storage an - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } roles: - remote_mount +``` ``ansible-playbook -i hosts remotmount.yml`` @@ -105,7 +135,7 @@ You can use localhost, then all RestAPI call will occur over https to Storage an Following example will connect up to the first host in your ansible host file, and then run the playbook and do API Call to Storage Cluster. So in this case the Client Cluster node needs access on https/443 to Storage Cluster GUI Node. - +```yaml - hosts: scale-client-cluster-node-1 gather_facts: false vars: @@ -118,7 +148,7 @@ So in this case the Client Cluster node needs access on https/443 to Storage Clu - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } roles: - remote_mount - +``` Firewall recommendations for communication among cluster's -------- diff --git a/roles/remotemount_configure/defaults/main.yml b/roles/remotemount_configure/defaults/main.yml index faa2dc7e..24996686 100644 --- a/roles/remotemount_configure/defaults/main.yml +++ b/roles/remotemount_configure/defaults/main.yml @@ -58,4 +58,32 @@ scale_remotemount_cleanup_remote_mount: false # Spectrum Scale uses the Deamon Node Name and the IP Attach to connect and run Cluster traffic. in most cases the admin network and deamon network is the same. # In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable: scale_remotemount_storage_adminnodename: true # Default = DeamonNodeName -#scale_remotemount_storage_adminnodename: false \ No newline at end of file +scale_remotemount_storage_adminnodename: false + + +# Added check that GPFS deamon is started on GUI node, it will check the first server in NodeClass GUI_MGMT_SERVERS +# Check can be disabled with changing the flag to false. +scale_remotemount_gpfsdemon_check: true + +# Default it will try to mount the filesystem on all client cluster (accessing) nodes, here you can replace the this with a comma seperated list of servers. +# scale1-test,scale2-test +# scale_remotemount_client_mount_on_nodes: all + +# When we are adding the storage Cluster in client cluster we need to spesify what nodes should be used. and in normal cases all nodes would be fine. +# In cases we have AFM Gateway nodes, or Cloud nodes TFCT, we want to use the RESTAPI filter to remove those nodes so they are not used. +# Example and the default below is to only list all servers that have (AFM) gatewayNode=false. +scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetwork.daemonNodeName&filter=roles.gatewayNode%3Dfalse' +# Examples: +# NO AFM and CloudGateway: ?fields=roles.gatewayNode%2Cnetwork.daemonNodeName%2Croles.cloudGatewayNode&filter=roles.gatewayNode%3Dfalse%2Croles.cloudGatewayNode%3Dfalse +# to create your own filter, go to the API Explorer on Spectrum Scale GUI. https://IP-TO-GUI-NODE/ibm/api/explorer/#!/Spectrum_Scale_REST_API_v2/nodesGetv2 +# Roles in version 5.1.1.3 +# "roles": { +# "cesNode": false, +# "cloudGatewayNode": false, +# "cnfsNode": false, +# "designation": "quorum", +# "gatewayNode": false, +# "managerNode": false, +# "otherNodeRoles": "perfmonNode", +# "quorumNode": true, +# "snmpNode": false \ No newline at end of file diff --git a/roles/remotemount_configure/tasks/main.yml b/roles/remotemount_configure/tasks/main.yml index fb30c85f..b4a52b4c 100644 --- a/roles/remotemount_configure/tasks/main.yml +++ b/roles/remotemount_configure/tasks/main.yml @@ -108,6 +108,76 @@ when: - access_cluster_status.status == 401 + - name: Main | Client Cluster (access) | Check status of GPFS deamon (Nodeclass GUI_MGMT_SERVERS) + uri: + validate_certs: "{{ validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + register: clientcluster_gpfs_deamon_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | Client Cluster (access) | Print status of GPFS deamon on GUI_MGMT_SERVERS - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ clientcluster_gpfs_deamon_status.json.states[0].state }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + + - name: Main | Client Cluster (access) | GPFS Deamon on Client Cluster is down (Nodeclass GUI_MGMT_SERVERS) + run_once: True + assert: + that: + - "'HEALTHY' or 'DEGRADED' in storagecluster_gpfs_deamon_status.json.states[0].state" + fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" + success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | Storage Cluster (owning) | Check status of gpfs deamon on GUI_MGMT_SERVERS + uri: + validate_certs: "{{ validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + method: GET + user: "{{ scale_remotemount_storage_gui_username }}" + password: "{{ scale_remotemount_storage_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + register: storagecluster_gpfs_deamon_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | storage Cluster (owning) | Print status of GPFS deamon on GUI_MGMT_SERVERS - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ storagecluster_gpfs_deamon_status.json.states[0].state }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + + - name: Main | Storage Cluster (owning) | GPFS Deamon on Storage Cluster is down (Nodeclass GUI_MGMT_SERVERS) + run_once: True + assert: + that: + - "'HEALTHY' or 'DEGRADED' in storagecluster_gpfs_deamon_status.json.states[0].state" + fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" + success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" + when: scale_remotemount_gpfsdemon_check | bool + - name: msg debug: msg: "Force Run was passed in, attempting to run remote_mount role regardless of whether the filesystem is configured." @@ -160,16 +230,51 @@ when: - storage_cluster_status.status == 401 + - name: Main | API-CLI | Storage Cluster (owning) | Check status of gpfs deamon on GUI_MGMT_SERVERS + uri: + validate_certs: "{{ validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + method: GET + user: "{{ scale_remotemount_storage_gui_username }}" + password: "{{ scale_remotemount_storage_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + register: storagecluster_gpfs_deamon_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | API-CLI | Storage Cluster (owning) | Print status of GPFS deamon on GUI_MGMT_SERVERS - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ storagecluster_gpfs_deamon_status.json.states[0].state }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + + - name: Main | API-CLI | Storage Cluster (owning) | GPFS Deamon on Storage Cluster is down (Nodeclass GUI_MGMT_SERVERS) + run_once: True + assert: + that: + - "'HEALTHY' or 'DEGRADED' in storagecluster_gpfs_deamon_status.json.states[0].state" + fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" + success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" + when: scale_remotemount_gpfsdemon_check | bool + - name: Main | API-CLI | Force Run debug: - msg: "Force Run was passed in, attempting to run remote_mount role regardless of whether the filesystem is configured." + msg: "Force Run was passed in, attempting to run remote_mount role regardless of whether the filesystem is configured" when: scale_remotemount_forceRun | bool - name: Main | API-CLI | Configure Remote Cluster include_tasks: remotecluster_api_cli.yml run_once: True - - name: Main | API-CLI | Remote mount the filesystem's + - name: Main | API-CLI | Remote Mount the filesystems include_tasks: mount_filesystem_api_cli.yml run_once: True diff --git a/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml index 2d90caa0..ee2aa3c8 100644 --- a/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml +++ b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml @@ -9,7 +9,7 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: no + validate_certs: "{{ validate_certs_uri }}" force_basic_auth: yes url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster method: GET @@ -57,7 +57,7 @@ - name: "Mount Filesystem - Rest-API | Storage Cluster (owner) | Check if filesystems is allready accessible for Client Cluster ('{{ access_cluster_name }}')" uri: - validate_certs: no + validate_certs: "{{ validate_certs_uri }}" force_basic_auth: yes url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET @@ -92,7 +92,7 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owning) | Allow and Set the client cluster filesystem access attributes on the Storage Cluster uri: - validate_certs: no + validate_certs: "{{ validate_certs_uri }}" force_basic_auth: true url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} method: POST @@ -121,7 +121,7 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster ##"{{ completed_check.json.jobs[0].jobId }}" uri: - validate_certs: no + validate_certs: "{{ validate_certs_uri }}" force_basic_auth: true url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} method: GET @@ -168,7 +168,7 @@ debug: msg: "Add the remotefileystem and mount it on the Client Side" -- name: Mount Filesystem - Rest-API | Client Cluster (access) | Add remote filesystem +- name: Mount Filesystem - Rest-API | Client Cluster (access) | Add remote filesystem - Output is from check. run_once: True shell: | /usr/lpp/mmfs/bin/mmremotefs add {{ item.item.scale_remotemount_client_filesystem_name }} -f {{ item.item.scale_remotemount_storage_filesystem_name }} -C {{ owning_cluster_name }} -T {{ item.item.scale_remotemount_client_remotemount_path }} -o {{ item.item.scale_remotemount_access_mount_attributes | default ('rw') }} -A {{ item.item.scale_remotemount_client_mount_fs | default ('yes') }} --mount-priority {{ item.item.scale_remotemount_client_mount_priority | default ('0') }} @@ -212,14 +212,13 @@ fail: msg: "Scale/GPFS deamon is NOT running on one or serveral of your client cluster node. Check and run mmount manually" when: "'down' in gpfs_deamon_state.stdout" - ignore_errors: true run_once: true # Not adding any check here, run only when when mmremotefs add task is also run. -- name: Client Cluster (access) | Mount remote filesystem on all client nodes +- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Output is from previous task, check if the filesystem's is allready mounted run_once: True - command: /usr/lpp/mmfs/bin/mmmount {{ item.item.scale_remotemount_client_filesystem_name }} -N {{ accessing_nodes_name }} + command: /usr/lpp/mmfs/bin/mmmount {{ item.item.scale_remotemount_client_filesystem_name }} -N {{ scale_remotemount_client_mount_on_nodes | default('all') }} loop: "{{ remote_filesystem_results_cli.results }}" when: - item.rc != 0 or scale_remotemount_forceRun | bool @@ -231,7 +230,7 @@ # Adding a stdout from previous as the stdout from the loop abow can be confusing when several loops. -- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Show stdout from the previous task. +- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Shows stdout from the previous task. debug: msg: "Message from mount remote filesystem task: {{ item }}" loop: "{{ client_cluster_mount_remotefs | json_query('results[*].stdout') }}" diff --git a/roles/remotemount_configure/tasks/mount_filesystems.yml b/roles/remotemount_configure/tasks/mount_filesystems.yml index 793fc9b2..c1d80092 100644 --- a/roles/remotemount_configure/tasks/mount_filesystems.yml +++ b/roles/remotemount_configure/tasks/mount_filesystems.yml @@ -1,5 +1,51 @@ --- -- name: Step 7 - Configure and Mount filesystems + + +- name: Step 7 - Check status of GPFS deamon on all nodes before mounting filesystem. + debug: + msg: "Check status of GPFS deamon on all nodes before mounting filesystem " + run_once: True +# +# Cheking that GPFS deamon is started on all nodes, else the adding and mounting of filesystem fails. +# RestAPI filters for GPFS deamon on all nodes with the state FAILED. +# +- name: Client Cluster (access) | Check status of GPFS deamon on all nodes before mounting filesystem. + uri: + validate_certs: "{{ validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/%3Aall%3A/health/states?fields=component%2Cstate&filter=component%3DGPFS%2Cstate%3DFAILED + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + register: clientcluster_gpfs_deamon_all_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + +- name: Client Cluster (access) | Print status of GPFS deamon - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ clientcluster_gpfs_deamon_all_status.json.states }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + +- name: Client Cluster (access) | One or more GPFS Deamon on Client Cluster is down. + run_once: True + assert: + that: + - "{{ clientcluster_gpfs_deamon_all_status.json.states|length == 0 }}" + fail_msg: "'GPFS Deamon is NOT started on all nodes, so mounting of filesystem will fail " + success_msg: "'GPFS Deamon is started on all nodes" + when: + - scale_remotemount_gpfsdemon_check | bool + +- name: Step 8 - Configure and Mount filesystems debug: msg: "Check if remotefileystem '{{ filesystem_loop.scale_remotemount_client_filesystem_name }}' is already defined on Client Cluster" run_once: True @@ -21,7 +67,7 @@ - name: block block: - - name: Step 8 + - name: Step 9 debug: msg: "Add the remotefileystem '{{ filesystem_loop.scale_remotemount_client_filesystem_name }}' and mount it on the Client Cluster (access)" run_once: True @@ -43,7 +89,7 @@ "remoteMountPath": "{{ filesystem_loop.scale_remotemount_client_remotemount_path | realpath }}", "mountOptions": "{{ filesystem_loop.scale_remotemount_access_mount_attributes | default('rw') }}", "automount": "{{ filesystem_loop.scale_remotemount_client_mount_fs | default('yes') }}", - "mountOnNodes": "all" + "mountOnNodes": "{{ scale_remotemount_client_mount_on_nodes | default('all') }}" } status_code: - 202 diff --git a/roles/remotemount_configure/tasks/remotecluster.yml b/roles/remotemount_configure/tasks/remotecluster.yml index bc2367d4..5b83405c 100644 --- a/roles/remotemount_configure/tasks/remotecluster.yml +++ b/roles/remotemount_configure/tasks/remotecluster.yml @@ -90,7 +90,9 @@ run_once: True # -# TODO: there is no Check if the Storage Cluster (Owner) is allready defined on Client Cluster +# TODO: there is no Check if the Storage Cluster (Owner) is allready defined on Client Cluster, so in some cases where storage cluster have connection to client cluster (mmauth) but the client cluster don't have, the playbook will fail +# as the owningcluster is in a array, we need to loop over or make list of the array to be able to use when: +# - name: Client Cluster (access) | List the remote cluster already defined uri: validate_certs: "{{ validate_certs_uri }}" @@ -266,7 +268,7 @@ uri: validate_certs: "{{ validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -315,7 +317,7 @@ # # adminNodeName section # - - name: scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: scale_remotemount_debug | Print out the array storing the adminNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -359,7 +361,7 @@ # # deamonNodeName section # - - name: scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: scale_remotemount_debug | Print out the array storing the DeamonNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_daemon_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -384,7 +386,7 @@ - 202 register: daemonnodesname_uri_result run_once: True - when: scale_remotemount_storage_adminnodename is not true + when: not scale_remotemount_storage_adminnodename - name: "Client Cluster (access) | Check the result of adding the remote Storage Cluster with DeamonNodeName (JOB: {{ daemonnodesname_uri_result.json.jobs[0].jobId }})" uri: @@ -399,7 +401,7 @@ retries: "{{ restapi_retries_count }}" delay: "{{ restapi_retries_delay }}" run_once: True - when: scale_remotemount_storage_adminnodename is not true + when: not scale_remotemount_storage_adminnodename when: - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) @@ -474,7 +476,7 @@ when: - 'item.item.scale_remotemount_storage_filesystem_name not in current_scale_remotemount_storage_filesystem_name' -- name: Mount Filesystem | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster "{{ item.json.jobs.0['jobId'] }}" +- name: Mount Filesystem | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster ##"{{ item.json.jobs.0['jobId'] }}" uri: validate_certs: "{{ validate_certs_uri }}" force_basic_auth: true diff --git a/roles/remotemount_configure/tasks/remotecluster_api_cli.yml b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml index 214cf149..02303b6f 100644 --- a/roles/remotemount_configure/tasks/remotecluster_api_cli.yml +++ b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml @@ -21,7 +21,7 @@ register: owning_cluster_info run_once: True -- name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Storage Cluster (owner) | Print the Cluster Information +- name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | scale_remotemount_debug | Print the Cluster Information debug: msg: "{{ owning_cluster_info }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -34,7 +34,7 @@ failed_when: false run_once: True -- name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Client Cluster (access) | Print the Cluster Information +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | scale_remotemount_debug | Print the Cluster Information debug: msg: "{{ access_cluster_info }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -116,31 +116,75 @@ when: - (remote_clusters_results.status == 200) or (scale_remotemount_forceRun | bool) +# Get node names and check if gpfs deamon is running. +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | GET the cluster nodes name information + shell: /usr/lpp/mmfs/bin/mmlscluster -Y | grep -v HEADER | grep clusterNode | cut -d ':' -f 8 + register: access_node_names + changed_when: false + failed_when: false + run_once: True + +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | scale_remotemount_debug | Print the Cluster Information + debug: + msg: "{{ access_node_names.stdout_lines | join(',') }}" + when: scale_remotemount_debug is defined and scale_remotemount_debug | bool + run_once: True + +- set_fact: + accessing_nodes_name: [] + run_once: True + +- set_fact: + accessing_nodes_name: "{{ access_node_names.stdout_lines | join(',') }}" + run_once: True + +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | Check if GPFS deamon is started + shell: /usr/lpp/mmfs/bin/mmgetstate -Y -N {{ accessing_nodes_name }} | grep -v HEADER | cut -d ':' -f 9 + register: gpfs_deamon_state + changed_when: false + run_once: true + +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | Fail if GPFS deamon is not started + fail: + msg: "Scale/GPFS deamon is NOT running on one or serveral of your client cluster node. Check and run mmount manually" + when: "'down' in gpfs_deamon_state.stdout" + ignore_errors: true + run_once: true + +# +# Section for doing the configuration of remote cluster. +# - name: Remote Cluster Config - API-CLI | Exchange the keys between Storage and Client Clusters (access) block: - name: Step 3 - Remote Cluster Config - API-CLI debug: msg: "Configure remote Cluster connection between Storage Cluster (owner) and Client Cluster (access)" run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Remote Cluster connection status debug: msg: "Remote Cluster connection to ('{{ access_cluster_name }}') is not configured, procceding with configuration" run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Client Cluster (Access) | Get the Public key from CLI and register shell: "cat /var/mmfs/ssl/id_rsa_committed.pub" register: accesskey_result run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results + - name: Remote Cluster Config - API-CLI | Client Cluster (accesing) | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results debug: msg: "{{ accesskey_result }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool run_once: True - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results to file ("{{ scale_remote_mount_client_access_key }}") + - name: Remote Cluster Config - API-CLI | Client Cluster (accesing) | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results to file ("{{ scale_remote_mount_client_access_key }}") copy: dest: "{{ scale_remote_mount_client_access_key }}" content: "{{ accesskey_result }}\n" @@ -169,6 +213,8 @@ - 202 register: send_key run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | Check the result of adding the Client Cluster {{ send_key.json.jobs[0].jobId }}" uri: @@ -183,6 +229,8 @@ retries: "{{ restapi_retries_count }}" delay: "{{ restapi_retries_delay }}" run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Get the Public Key uri: @@ -229,7 +277,7 @@ uri: validate_certs: "{{ validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -280,7 +328,7 @@ # # adminNodeName section # - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the AdminNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -297,7 +345,7 @@ # # deamonNodeName section # - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the DeamonNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_daemon_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -310,7 +358,7 @@ register: remote_cluster_add_ssh failed_when: - "remote_cluster_add_ssh.rc != 0 and 'is already defined' not in remote_cluster_add_ssh.stderr" - when: scale_remotemount_storage_adminnodename is not true + when: not scale_remotemount_storage_adminnodename - name: Remote Cluster Config - API-CLI | Client Cluster (Access) | Cleanup temporary keys. file: From b824a5e25b66ad07ad471983275a9b79b3356928 Mon Sep 17 00:00:00 2001 From: Ole Kristian Date: Mon, 3 Jan 2022 14:36:10 +0100 Subject: [PATCH 039/113] Updated the format and test in VARIABLESNEW.md file. Signed-off-by: Ole Kristian --- VARIABLESNEW.md | 262 +++++++++++++++++++++++++----------------------- 1 file changed, 135 insertions(+), 127 deletions(-) diff --git a/VARIABLESNEW.md b/VARIABLESNEW.md index f1e5f6e2..ba11f64b 100644 --- a/VARIABLESNEW.md +++ b/VARIABLESNEW.md @@ -3,130 +3,138 @@ Variables used by Spectrum Scale (GPFS) Ansible project Variables list is dived into each if the Ansible roles. -**Core** - -| Role | Variables | Default | Options | User Mandatory | Descriptions | -|------|----------------------------------------|----------------------------|------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Core | scale_architecture: | {{ ansible_architecture }} | x86_64 or ppc64le | no | Specify the Spectrum Scale architecture that you want to install on your nodes. | -| Core | scale_version: | none | 5.x.x.x | yes | Specify the Spectrum Scale version that you want to install on your nodes. With 5.0.5.x. | -| Core | scale_daemon_nodename: | {{ ansible_hostname }} | none | no | Spectrum Scale daemon nodename defaults to nodes hostname | -| Core | scale_admin_nodename: | {{ ansible_hostname }} | none | no | Spectrum Scale admin nodename defaults to nodes hostname | -| Core | scale_state: | present | present,maintenance,absent | no | Desired state of the Spectrum Scale node. present - node will be added to cluster, daemon will be started maintenance - node will be added to cluster, daemon will not be started absent - node will be removed from cluster | -| Core | scale_prepare_disable_selinux | false | true or false | no | Whether or not to disable SELinux. | -| Core | scale_reboot_automatic | false | true or false | no | Whether or not to automatically reboot nodes - if set to false then only a message is printed. If set to true then nodes are automatically rebooted (dangerous!). | -| Core | scale_prepare_enable_ssh_login | false | true or false | no | Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). | -| Core | scale_prepare_restrict_ssh_address | false | true or false | no | Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires scale_prepare_enable_ssh_login to be enabled. | -| Core | scale_prepare_disable_ssh_hostkeycheck | false | true or false | no | Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). | -| Core | scale_prepare_exchange_keys | false | true or false | no | Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires scale_prepare_exchange_keys to be enabled, too. | -| Core | scale_prepare_pubkey_path | /root/.ssh/id_rsa.pub | /root/.ssh/gpfskey.pub | no | example: /root/.ssh/gpfskey.pub | -| Core | scale_prepare_disable_firewall | default: false | true or false | no | Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to false and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). | -| Core | scale_install_localpkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to the self-extracting Spectrum Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. | -| Core | scale_install_remotepkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to Spectrum Scale installation package on the remote system (accessible on Ansible managed node). | -| Core | scale_install_repository_url | none | example: http://server/gpfs/ | yes | Specify the URL of the (existing) Spectrum Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository). Note that if this is a URL then a new repository definition will be created. If this variable is set to existing then it is assumed that a repository definition already exists and thus will *not* be created. | -| Core | scale_install_directory_pkg_path | none | example: /tmp/gpfs/ | yes | Specify the path to the user-provided directory, containing all Spectrum Scale packages. Note that for this installation method all packages need to be kept in a single directory. | -| Core | scale_cluster_quorum | false | true or false | no | If you dont specify any quorum nodes then the first seven hosts in your inventory will automatically be assigned the quorum role. even if this variable is false | -| Core | scale_cluster_manager | false | true or false | no | Nodes default manager role - you ll likely want to define per-node roles in your inventory | -| Core | scale_cluster_profile_name: | none | gpfsprotocoldefaults or gpfsprotocolrandomio | no | Specifies a predefined profile of attributes to be applied. System-defined profiles are located in /usr/lpp/mmfs/profiles/. The following system-defined profile names are accepted. gpfsprotocoldefaults and gpfsprotocolrandomio eg. If you want to apply gpfsprotocoldefaults then specify scale_cluster_profile_name: gpfsprotocoldefaults | -| Core | scale_cluster_profile_dir_path | /usr/lpp/mmfs/profiles/ | Path to cluster profile: example: /usr/lpp/mmfs/profiles/ | no | Fixed variable related to mmcrcluster profile. System-defined profiles are located in /usr/lpp/mmfs/profiles/ | -| Core | scale_enable_gpg_check: | true | true or false | no | Enable/disable gpg key flag | -| Core | scale_install_localpkg_tmpdir_path | /tmp | path to folder. | no | Temporary directory to copy installation package to (local package installation method) | -| Core | scale_nodeclass: | none | Name of the nodeclass: example scale nodeclass: - class1 | no | Node classes can be defined on a per-node basis by defining the scale_nodeclass variable. | -| Core | scale_config: | none | scale_config:
  - nodeclass: class1
    params:
        - pagepool: 4G
        - autoload: yes
        - ignorePrefetchLunCount: yes | no | Configuration attributes can be defined as variables for *any* host in the play — the host for which you define the configuration attribute is irrelevant. Refer to the man mmchconfig man page for a list of available configuration attributes. | -| Core | scale_storage: | none | scale_storage:
     filesystem: gpfs01
     blockSize: 4M
     maxMetadataReplicas: 2
     defaultMetadataReplicas: 2
     maxDataReplicas: 2
     defaultDataReplicas: 2
     numNodes: 16
     automaticMountOption: true
     defaultMountPoint: /mnt/gpfs01
     disks:
          - device: /dev/sdb
            nsd: nsd_1
            servers: scale01
            failureGroup: 10
            usage: metadataOnly
            pool: system
          - device: /dev/sdc
            nsd: nsd_2
            servers: scale01
            failureGroup: 10
            usage: dataOnly
            pool: data | no | Refer to man mmchfs and man mmchnsd man pages for a description of these storage parameters. The filesystem parameter is mandatory, servers, and the device parameter is mandatory for each of the file systems disks. All other file system and disk parameters are optional. scale_storage *must* be define using group variables. Do *not* define disk parameters using host variables or inline variables in your playbook. Doing sowould apply them to all hosts in the group/play, thus defining the same disk multiple times... | -| Core | scale_protocol_node: | none | true or false | no | Enable to set node to uses as Protcol Node, by host variable. | -| Core | scale_admin_node | false | true or false | no | Set admin flag on node for Ansible to use. | -| Core | scale_nsd_server | scale_nsd_server | true or false | no | Set nsd flag for installation purpose | -| GUI | scale_gui_hide_tip_callhome | false | true or false | no | Hide the Callhome not enabled tip on gui | -| GUI | scale_cluster_gui: | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | -| GUI | scale_service_gui_start: | true | true or false | no | Wheter or not to start the Scale GUI after installation. | -| GUI | scale_gui_admin_user: | none | admin | no | Spesify a name for the admin user to be created. | -| GUI | scale_gui_admin_password: | none | Admin@GUI! | no | Password to be set on the admin user | -| GUI | scale_gui_admin_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | -| GUI | scale_gui_user_username: | none | SEC | no | Ekstra Spectrum Scale GUI user. example: Monitor or RestAPI. | -| GUI | scale_gui_user_password: | none | Storage@Scale1 | no | Password for extra user | -| GUI | scale_gui_user_role: | none | SystemAdmin | no | Role access for the extra user. | -| GUI | scale_gui_admin_hc_vault: | none | N/A | no | HasiCorp - Create local Admin user with password from vault, cant be combined with the scale_gui_admin_user | -| GUI | scale_gui_admin_hc_vault_user: | noen | admin | no | Create local admin user and write password to Vault | -| GUI | scale_gui_admin_hc_vault_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | -| GUI | scale_gui_cert_hc_vault: | false | true or false | no | Generate https Certificate from HasiCorp Vault and import it to Scale GUI. The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normaly the playbook is then run from Terraform.## The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normaly the playbook is then run from Terraform. | -| GUI | scale_gui_password_policy_change: | false | true or false | no | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default | | -| GUI | scale_gui_password_policy: | false | scale_gui_password_policy:
  minLength: 6
  maxAge: 900
  minAge: 0
  remember: 3
  minUpperChars: 0
  minLowerChars: 0
  minSpecialChars: 0
  minDigits: 0
  maxRepeat: 0
  minDiff: 1
  rejectOrAllowUserName: --rejectUserName | | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default.
  scale_gui_password_policy:
  minLength: 6 ## Minimum password length
  maxAge: 900 ## Maximum password age
  minAge: 0 ## Minimum password age
  remember: 3 ## Remember old passwords
  minUpperChars: 0 ## Minimum upper case characters
  minLowerChars: 0 ## Minimum lower case characters
  minSpecialChars: 0 ## Minimum special case characters
  minDigits: 0 ## Minimum digits
  maxRepeat: 0 ## Maximum number of repeat characters
  minDiff: 1 ## Minimum different characters with respect to old password
  rejectOrAllowUserName:
  --rejectUserName ## either --rejectUserName or --allowUserName | -| GUI | scale_gui_ldap_integration: | false | true or false | no | Active Directory information for Managing GUI users in an external AD or LDAP server | -| GUI | scale_gui_ldap: | none | scale_gui_ldap:
  name: 'myad'
  host: 'myad.mydomain.local'
  bindDn: 'CN=servicebind,CN=Users,DC=mydomain,DC=local'
  bindPassword: 'password'
  baseDn: 'CN=Users,DC=mydomain,DC=local'
  port: '389' #Default 389
  type: 'ad' #Default Microsoft Active Directory
  #securekeystore: /tmp/ad.jks #Local on GUI Node
  #secureport: '636' #Default 636 | no | Managing GUI users in an external AD or LDAP Parameters
 Parameter Description
  - **name:** Alias for your LDAP/AD server
  - **host:** The IP address or host name of the LDAP server.
  - **baseDn:** BasedDn string for the repository.
  - **bindDn:** BindDn string for the authentication user.
  - **bindPassword:** Password of the authentication user.
  - **port:** Port number of the LDAP. Default is 389
  - **type:** Repository type (ad, ids, domino, secureway, iplanet, netscape, edirectory or custom). Default is ad.
  - **securekeystore:** Location with file name of the keystore file (.jks, .p12 or .pfx).
  - **secureport:** Port number of the LDAP. 636 over SSL. | -| GUI | scale_gui_groups: | none | scale_gui_groups:
  administrator: 'scale-admin'
  securityadmin: 'scale-securityadmin'
  storageadmin: 'scale-storage-administrator'
  snapadmin: 'scale-snapshot-administrator'
  data_access: 'scale-data-access'
  monitor: 'scale-monitor'
  protocoladmin: 'scale-protocoladmin'
  useradmin: 'scale-useradmin' | no | The LDAP/AD Groups needs to be create in the LDAP. (You don't need created before deployment.)
You'll likely want to define this in your host inventory
Add the mappings that you want and replace the **scale-** with your ldap groups.

The following are the default user groups:
  - **Administrator** - Manages all functions on the system except those deals with managing users, user groups, and authentication.
  - **SecurityAdmin** - Manages all functions on the system, including managing users, user groups, and user authentication.
  - **SystemAdmin** - Manages clusters, nodes, alert logs, and authentication.
  - **StorageAdmin** - Manages disks, file systems, pools, filesets, and ILM policies.
  - **SnapAdmin** - Manages snapshots for file systems and filesets.
  - **DataAccess** - Controls access to data. For example, managing access control lists.
  - **Monitor** - Monitors objects and system configuration but cannot configure, modify, or manage the system or its resources.
  - **ProtocolAdmin** - Manages object storage and data export definitions of SMB and NFS protocols.
  - **UserAdmin** - Manages access for GUI users. Users who are part of this group have edit permissions only in the Access pages of the GUI. Check IBM doc for updated list | -| GUI | scale_gui_email_notification: | false | false or true | no | Enable E-mail notifications in Spectrum Scale GUI | -| GUI | scale_gui_email: | none | scale_gui_email:
  name: 'SMTP_1'
  ipaddress: 'emailserverhost'
  ipport: '25'
  replay_email_address: scale-server-test@acme.com
  contact_name: 'scale-contact-person'
  subject: &cluster&message
  sender_login_id:
  password:
  headertext:
  footertext: | no | - The email feature transmits operational and error-related data in the form of an event notification email.
  - Email notifications can be customized by setting a custom header and footer for the emails and customizing the subject by selecting and combining from the following variables: &message, &messageId, &severity, &dateAndTime, &cluster and &component.
  - **name** - Specifies a name for the e-mail server.
  - **address** - Specifies the address of the e-mail server. Enter the SMTP server IP address or host name. For example, 10.45.45.12 or smtp.example.com.
  - **portNumber** - Specifies the port number of the e-mail server. Optional.
  - **reply_email_address/sender_address** - Specifies the sender's email address.
  - **contact_name/sender_name** - Specifies the sender's name.
  - **subject** Notifications can be customized by setting a custom header and footer or with variable like &cluster&message ## Variables: &message &messageId &severity &dateAndTime &cluster&component
  - **sender_login_id** - Login needed to authenticate sender with email server in case the login is different from the sender address (--reply). Optional.
  - **password** - Password used to authenticate sender address (--reply) or login id (--login) with the email sever | -| GUI | scale_gui_email_recipients: | none | scale_gui_email_recipients:
  name: 'name_email_recipient_name':
  address: 'email_recipient_address@email.com':
  components_security_level: 'SCALEMGMT=WARNING,CESNETWORK=WARNING':
  reports: 'DISK,GPFS,AUTH':
  quotaNotification: '--quotaNotification' ##if defined it enabled quota Notification:
  quotathreshold: '70.0' | no | **Options:**
  - **NAME**: Name of the email Recipients
  - **Address:** userAddress Specifies the address of the e-mail user
  - **Components_security_level**
     - The value scale_gui_email_recipients_components_security_level: Need to contain the **Component** and the **Warning/Security Level**
       - Chose component like **SCALEMGMT** and the security_level of WARNING wil be **SCALEMGMT=ERROR**
       - Security level: Chose the lowest severity of an event for which you want to receive and email. Example, selectin Tip includes events with severity Tip, Warning, and Error in the email.
       - The Severity level is as follows: : **INFO**, **TIP**, **WARNING**, **ERROR**
     **List of all security levels:**
      AFM=WARNING,AUTH=WARNING,BLOCK=WARNING,CESNETWORK=WARNING,CLOUDGATEWAY=WARNING,CLUSTERSTATE=WARNING,DISK=WARNING,FILEAUDITLOG=WARNING,FILESYSTEM=WARNING,GPFS=WARNING,GUI=WARNING,HADOOPCONNECTOR=WARNING,
      KEYSTONE=WARNING,MSGQUEUE=WARNING,NETWORK=WARNING,NFS=WARNING,OBJECT=WARNING,PERFMON=WARNING,SCALEMGMT=WARNING,SMB=WARNING,CUSTOM=WARNING,AUTH_OBJ=WARNING,CES=WARNING,CESIP=WARNING,NODE=WARNING,THRESHOLD=WARNING,WATCHFOLDER=WARNING,NVME=WARNING,POWERHW=WARNING
  - **Reports** listOfComponents
       - Specifies the components to be reported. The tasks generating reports are scheduled by default to send a report once per day. Optional.
        AFM,AUTH,BLOCK,CESNETWORK,CLOUDGATEWAY,CLUSTERSTATE,DISK,FILEAUDITLOG,FILESYSTEM,GPFS,GUI,HADOOPCONNECTOR,KEYSTONE,MSGQUEUE,NETWORK,NFS,OBJECT,PERFMON,SCALEMGMT,SMB,CUSTOM,AUTH_OBJ,CES,CESIP,NODE,THRESHOLD,WATCHFOLDER,NVME,POWERHW
  - **quotaNotification**
     Enables quota notifications which are sent out if the specified threshold is violated. (See --quotathreshold)
  - **quotathreshold** valueInPercent
     - Sets the threshold(percent of the hard limit)for including quota violations in the quota digest report.
     - The default value is 100. The values -3, -2, -1, and zero have special meaning.
     - Specify the value -2 to include all results, even entries where the hard quota not set.
     - Specify the value -1 to include all entries where hard quota is set and current usage is greater than or equal to the soft quota.
     - Specify the value -3 to include all entries where hard quota is not set and current usage is greater than or equal to the soft quota only.
     - Specify the value 0 to include all entries where the hard quota is set.
  Using unlisted options can lead to an error | -| GUI | scale_gui_snmp_notification: | false | true or false | no | Enable SNMP notifications in Spectrum Scale GUI | -| GUI | scale_gui_snmp_server: | false | scale_gui_snmp_server:
  ip_adress: 'snmp_server_host'
  ip_port: '162'
  community: 'Public' | no | - To Configure SNMP Notification.
    - Change the Value:
      - scale_gui_snmp_notification: true
       - ip_adress to your SNMP server/host
       - ip_port to your SNMP port
       - community to your SNMP community | - -**Protocol** - -| Role | variables | Default | Options | User Mandatory | Descriptions | -|-------------------|--------------------------|---------|---------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Protocol/NFS | scale_install_debuginfo: | true | true or false | no | Flag to install ganesha debug package | -| Protocol/SMB | scale_install_debuginfo: | true | true or false | no | Flag to install smb debug package | -| Protocol | scale_protocols: #IPV4 | none | scale_protocols: #IPV4
  smb: true
  nfs: true
  object: true
  export_ip_pool: [192.168.100.100,192.168.100.101]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | To install Spectrum Scale Protocol. Refer to man mmces man pages for a description of these Cluster Export. scale_ces_groups can also be user to group nodes. | -| Protocol | scale_protocols: #Ipv6 | none | scale_protocols: #Ipv6
  smb: true
  nfs: true
  object: true
  interface: [eth0]
  export_ip_pool: [2002:90b:e006:84:250:56ff:feb9:7787]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | For enabling Cluster Export Services in an IPv6 environment one also needs to define an interface parameter: scale_ces_groups can also be user to group nodes. | -| Protocol/Object | scale_ces_obj: | none | scale_ces_obj:
  dynamic_url: False
  enable_s3: False
  local_keystone: True
  enable_file_access: False
  endpoint_hostname: scale-11
  object_fileset: Object_Fileset
  pwd_file: obj_passwd.j2
  admin_user: admin
  admin_pwd: admin001
  database_pwd: admin001 | no | Missing descriptions. - - - -**HDFS** - -| Role | variables | Default | Options | User Mandatory | Descriptions | -|------|----------------------|---------|-----------------------------|----------------|---------------------------------------------------------| -| hdfs | ha_enabled: | false | true or false | no | HA for namenode in HDFS? | -| hdfs | scale_hdfs_clusters: | none | - name: mycluster
  filesystem: gpfs1
  namenodes: ['host-vm1.test.net', 'host-vm2.test.net']
  datanodes: ['host-vm3.test.net', 'host-vm4.test.net', 'host-vm5.test.net']
  datadir: datadir | no | Install IBM Spectrum Scale (HDFS), "Document more" - - -**Performance Monitoring - zimon** - -| Role | variables | Default | Options | User Mandatory | Descriptions | -|-------|------------------------|---------|---------------|----------------|-------------------------------------------------------------------------------| -| zimon | scale_zimon_collector: | false | true or false | no | Nodes default GUI collector role, its install the collector on all GUI nodes. | -| zimon | scale_cluster_gui | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | -| zimon | scale_cluster_zimon | false | true or false | no | Install up zimon enabled | - - -**FileAudit Logging** - -| Role | variables | Default | Options | User Mandatory | Descriptions | -|-------------------------------|------------------------|---------|----------------|----------------|------------------------------------------------------------------------------| -|fal | scale_fal_enable | true | true or false | no | Flag to enable fileauditlogging | - -**CallHome** - -| Role | variables | Default | Options | User Mandatory | Descriptions | -|----------------------------------:|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|----------------|--------------| -| callhome |scale_callhome_params: | None | scale_callhome_params:
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly]
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly] | no | Refer to man mmcallhome man pages for a description of these Call Homes
**scale_callhome_params**:
  is_enabled: true
  customer_name: abc
  customer_email: abc@abc.com
  customer_id: 12345
  customer_country: IN
  proxy_ip:
  proxy_port:
  proxy_user:
  proxy_password:
  proxy_location:
  callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
  callhome_group1: [scale01,scale02,scale03,scale04]
  callhome_schedule: [daily,weekly] | - - -**Remote Mount Filesystem** - -| Role | variables | Default | Options | User Mandatory | Descriptions | -|-------------|-------------------------------------------------|-------------------------------------------------|-------------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| remotemount | scale_remotemount_client_gui_username: | none | username, example admin | yes | Scale User with Administrator or ContainerOperator role/rights | -| remotemount | scale_remotemount_client_gui_password: | none | password for user | yes | Password for Scale User with Administrator or ContainerOperator role/rights | -| remotemount | scale_remotemount_client_gui_hostname: | none | 10.10.10.1 | yes | IP or Hostname to Client GUI Node | -| remotemount | scale_remotemount_storage_gui_username: | none | | yes | Scale User with Administrator or ContainerOperator role/rights | -| remotemount | scale_remotemount_storage_gui_password: | none | | yes | Password for Scale User with Administrator or ContainerOperator role/rights | -| remotemount | scale_remotemount_storage_gui_hostname: | none | | yes | IP or Hostname to Storage GUI Node | -| remotemount | scale_remotemount_storage_adminnodename: | false | true or false | no | Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true | -| remotemount | scale_remotemount_filesystem_name | none | scale_remotemount_filesystem_name
  scale_remotemount_client_filesystem_name:
  scale_remotemount_client_remotemount_path:
  scale_remotemount_storage_filesystem_name:
  scale_remotemount_access_mount_attributes:
  scale_remotemount_client_mount_fs:
  scale_remotemount_client_mount_priority: 0 | yes | The Values in the list needs to be in list, as we support to mount up more filesystem.

  - Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names.
  - Path to where the filesystem shoul be Mounted: /gpfs01/fs1
  - Storage Cluster filesystem you want to mount: gpfs01
  - Filesystem can be mounted in different access mount: RW or RO
  - Indicates when the file system is to be mounted: options are yes, no, automount (When the file system is first accessed.)
  - File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. valid values: 0 | -| remotemount | scale_remotemount_client_filesystem_name: | none | fs1 | yes | Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names. | -| remotemount | scale_remotemount_client_remotemount_path: | none | /gpfs01/fs1 | yes | Path to where the filesystem shoul be Mounted. | -| remotemount | scale_remotemount_storage_filesystem_name: | none | gpfs01 | yes | Storage Cluster filesystem you want to mount | -| remotemount | scale_remotemount_access_mount_attributes: | rw | RW, RO | no | Filesystem can be mounted in different access mount: RW or RO | -| remotemount | scale_remotemount_client_mount_fs: | yes | yes, no , automount | no | Indicates when the file system is to be mounted:** options are yes, no, automount (When the file system is first accessed.) | -| remotemount | scale_remotemount_client_mount_priority: 0 | 0 | 0 - x | no | File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. | -| remotemount | scale_remotemount_client_no_gui: | false | true or false | no | If Accessing/Client Cluster dont have GUI, it will use CLI/SSH against Client Cluster | -| remotemount | scale_remotemount_storage_pub_key_location: | Defaults to /tmp/storage_cluster_public_key.pub | path to pubkey | no | Client Cluster (Access) is downloading the pubkey from Owning cluster and importing it | -| remotemount | scale_remotemount_cleanup_remote_mount: | false | true or false | no | Unmounts, remove the filesystem, and the connection between Accessing/Client cluster and Owner/Storage Cluster. This now works on clusters that not have GUI/RESTAPI interface on Client Cluster | -| remotemount | scale_remotemount_debug: | false | true or false | no | Outputs debug information after tasks | -| remotemount | scale_remotemount_forceRun: | false | true or false | no | If scale_remotemount_forceRun is passed in, then the playbook is attempting to run remote_mount role regardless of whether the filesystem is configured | -| remotemount | scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to | no | Client Cluster (Access) pubkey that is changed from json to right format and then used when creating connection | -| remotemount | scale_remotemount_storage_pub_key_location_json | /tmp/storage_cluster_public_key_json.pub | path to | no | Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster | -| remotemount | scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | -| - +**Role: Core - Core Spectrum Scale installation and configuration** + +| Variables | Default | Options | User Mandatory | Descriptions | +|----------------------------------------|----------------------------|------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| scale_architecture: | {{ansible_architecture}} | x86_64 or ppc64le | no | This ansible_architecture is gather from ansible get_facts module, Spectrum Scale architecture that you want to install on your nodes. | +| scale_version: | none | 5.x.x.x | yes | Specify the Spectrum Scale version that you want to install on your nodes. With 5.0.5.x. | +| scale_daemon_nodename: | {{ansible_hostname}} | none | no | Spectrum Scale daemon nodename defaults to nodes hostname | +| scale_admin_nodename: | {{ansible_hostname}} | none | no | Spectrum Scale admin nodename defaults to nodes hostname | +| scale_state: | present | present,maintenance,absent | no | Desired state of the Spectrum Scale node. present - node will be added to cluster, daemon will be started maintenance
node will be added to cluster, daemon will not be started absent - node will be removed from cluster | +| scale_prepare_disable_selinux | false | true or false | no | Whether or not to disable SELinux. | +| scale_reboot_automatic | false | true or false | no | Whether or not to automatically reboot nodes - if set to false then only a message is printed. If set to true then nodes are automatically rebooted (dangerous!). | +| scale_prepare_enable_ssh_login | false | true or false | no | Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). | +| scale_prepare_restrict_ssh_address | false | true or false | no | Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires scale_prepare_enable_ssh_login to be enabled. | +| scale_prepare_disable_ssh_hostkeycheck | false | true or false | no | Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). | +| scale_prepare_exchange_keys | false | true or false | no | Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires scale_prepare_exchange_keys to be enabled, too. | +| scale_prepare_pubkey_path | /root/.ssh/id_rsa.pub | /root/.ssh/gpfskey.pub | no | example: /root/.ssh/gpfskey.pub | +| scale_prepare_disable_firewall | default: false | true or false | no | Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to false and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). | +| scale_install_localpkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to the self-extracting Spectrum Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. | +| scale_install_remotepkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to Spectrum Scale installation package on the remote system (accessible on Ansible managed node). | +| scale_install_repository_url | none | example: http://server/gpfs/ | yes | Specify the URL of the (existing) Spectrum Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository).
Note that if this is a URL then a new repository definition will be created. If this variable is set to existing then it is assumed that a repository definition already exists and thus will *not* be created. | +| scale_install_directory_pkg_path | none | example: /tmp/gpfs/ | yes | Specify the path to the user-provided directory, containing all Spectrum Scale packages. Note that for this installation method all packages need to be kept in a single directory. | +| scale_cluster_quorum | false | true or false | no | If you dont specify any quorum nodes then the first seven hosts in your inventory will automatically be assigned the quorum role. even if this variable is false | +| scale_cluster_manager | false | true or false | no | Nodes default manager role - you ll likely want to define per-node roles in your inventory | +| scale_cluster_profile_name: | none | gpfsprotocoldefaults or gpfsprotocolrandomio | no | Specifies a predefined profile of attributes to be applied. System-defined profiles are located in /usr/lpp/mmfs/profiles/
The following system-defined profile names are accepted. gpfsprotocoldefaults and gpfsprotocolrandomio
eg. If you want to apply gpfsprotocoldefaults then specify scale_cluster_profile_name: gpfsprotocoldefaults | +| scale_cluster_profile_dir_path | /usr/lpp/mmfs/profiles/ | Path to cluster profile: example: /usr/lpp/mmfs/profiles/ | no | Fixed variable related to mmcrcluster profile. System-defined profiles are located in /usr/lpp/mmfs/profiles/ | +| scale_enable_gpg_check: | true | true or false | no | Enable/disable gpg key flag | +| scale_install_localpkg_tmpdir_path | /tmp | path to folder. | no | Temporary directory to copy installation package to (local package installation method) | +| scale_nodeclass: | none | Name of the nodeclass: example scale nodeclass: - class1 | no | Node classes can be defined on a per-node basis by defining the scale_nodeclass variable. | +| scale_config: | none | scale_config:
  - nodeclass: class1
    params:
        - pagepool: 4G
        - autoload: yes
        - ignorePrefetchLunCount: yes | no | Configuration attributes can be defined as variables for *any* host in the play
The host for which you define the configuration attribute is irrelevant. Refer to the man mmchconfig man page for a list of available configuration attributes. | +| scale_storage: | none | scale_storage:
     filesystem: gpfs01
     blockSize: 4M
     maxMetadataReplicas: 2
     defaultMetadataReplicas: 2
     maxDataReplicas: 2
     defaultDataReplicas: 2
     numNodes: 16
     automaticMountOption: true
     defaultMountPoint: /mnt/gpfs01
     disks:
          - device: /dev/sdb
            nsd: nsd_1
            servers: scale01
            failureGroup: 10
            usage: metadataOnly
            pool: system
          - device: /dev/sdc
            nsd: nsd_2
            servers: scale01
            failureGroup: 10
            usage: dataOnly
            pool: data | no | Refer to man mmchfs and man mmchnsd man pages for a description of these storage parameters.
The filesystem parameter is mandatory, servers, and the device parameter is mandatory for each of the file systems disks.
All other file system and disk parameters are optional. scale_storage *must* be define using group variables.
Do *not* define disk parameters using host variables or inline variables in your playbook.
Doing so would apply them to all hosts in the group/play, thus defining the same disk multiple times... | +| scale_admin_node | false | true or false | no | Set admin flag on node for Ansible to use. | +| scale_nsd_server | scale_nsd_server | true or false | no | Set nsd flag for installation purpose | + +**Role: GUI - GUI for Management of Spectrum Scale Cluster** + +| Variables | Default | Options | User Mandatory | Descriptions | +|-----------------------------------|----------------------------|------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| scale_gui_hide_tip_callhome | false | true or false | no | Hide the Callhome not enabled tip on gui | +| scale_cluster_gui: | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | +| scale_service_gui_start: | true | true or false | no | Wheter or not to start the Scale GUI after installation. | +| scale_gui_admin_user: | none | admin | no | Spesify a name for the admin user to be created. | +| scale_gui_admin_password: | none | Admin@GUI! | no | Password to be set on the admin user | +| scale_gui_admin_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| scale_gui_user_username: | none | SEC | no | Ekstra Spectrum Scale GUI user. example: Monitor or RestAPI. | +| scale_gui_user_password: | none | Storage@Scale1 | no | Password for extra user | +| scale_gui_user_role: | none | SystemAdmin | no | Role access for the extra user. | +| scale_gui_admin_hc_vault: | none | N/A | no | HasiCorp - Create local Admin user with password from vault, cant be combined with the scale_gui_admin_user | +| scale_gui_admin_hc_vault_user: | noen | admin | no | Create local admin user and write password to Vault | +| scale_gui_admin_hc_vault_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| scale_gui_cert_hc_vault: | false | true or false | no | Generate https Certificate from HasiCorp Vault and import it to Scale GUI.
The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normally the playbook is then run from Terraform. | +| scale_gui_password_policy_change: | false | true or false | no | Change default GUI User Password Policy change what you need in your inventory files and rest wil use default, used with **scale_gui_password_policy:** | | +| scale_gui_password_policy: | false | scale_gui_password_policy:
  minLength: 6
  maxAge: 900
  minAge: 0
  remember: 3
  minUpperChars: 0
  minLowerChars: 0
  minSpecialChars: 0
  minDigits: 0
  maxRepeat: 0
  minDiff: 1
  rejectOrAllowUserName: --rejectUserName | | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default.
  
 scale_gui_password_policy:
  minLength: 6 ## Minimum password length
  maxAge: 900 ## Maximum password age
  minAge: 0 ## Minimum password age
  remember: 3 ## Remember old passwords
  minUpperChars: 0 ## Minimum upper case characters
  minLowerChars: 0 ## Minimum lower case characters
  minSpecialChars: 0 ## Minimum special case characters
  minDigits: 0 ## Minimum digits
  maxRepeat: 0 ## Maximum number of repeat characters
  minDiff: 1 ## Minimum different characters with respect to old password
  rejectOrAllowUserName: --rejectUserName ## either --rejectUserName or --allowUserName | +| scale_gui_ldap_integration: | false | true or false | no | Active Directory information for Managing GUI users in an external AD or LDAP server | +| scale_gui_ldap: | none | scale_gui_ldap:
  name: 'myad'
  host: 'myad.mydomain.local'
  bindDn: 'CN=servicebind,CN=Users,DC=mydomain,DC=local'
  bindPassword: 'password'
  baseDn: 'CN=Users,DC=mydomain,DC=local'
  port: '389' #Default 389
  type: 'ad' #Default Microsoft Active Directory
  #securekeystore: /tmp/ad.jks #Local on GUI Node
  #secureport: '636' #Default 636 | no | Managing GUI users in an external AD or LDAP Parameters
 
 Parameter Description
  - **name:** Alias for your LDAP/AD server
  - **host:** The IP address or host name of the LDAP server.
  - **baseDn:** BasedDn string for the repository.
  - **bindDn:** BindDn string for the authentication user.
  - **bindPassword:** Password of the authentication user.
  - **port:** Port number of the LDAP. Default is 389
  - **type:** Repository type (ad, ids, domino, secureway, iplanet, netscape, edirectory or custom). Default is ad.
  - **securekeystore:** Location with file name of the keystore file (.jks, .p12 or .pfx).
  - **secureport:** Port number of the LDAP. 636 over SSL. | +| scale_gui_groups: | none | scale_gui_groups:
  administrator: 'scale-admin'
  securityadmin: 'scale-securityadmin'
  storageadmin: 'scale-storage-administrator'
  snapadmin: 'scale-snapshot-administrator'
  data_access: 'scale-data-access'
  monitor: 'scale-monitor'
  protocoladmin: 'scale-protocoladmin'
  useradmin: 'scale-useradmin' | no | The LDAP/AD Groups needs to be create in the LDAP. (You don't need created before deployment.)
You'll likely want to define this in your host inventory
Add the mappings that you want and replace the **scale-** with your ldap groups.

The following are the default user groups:
  - **Administrator** - Manages all functions on the system except those deals with managing users, user groups, and authentication.
  - **SecurityAdmin** - Manages all functions on the system, including managing users, user groups, and user authentication.
  - **SystemAdmin** - Manages clusters, nodes, alert logs, and authentication.
  - **StorageAdmin** - Manages disks, file systems, pools, filesets, and ILM policies.
  - **SnapAdmin** - Manages snapshots for file systems and filesets.
  - **DataAccess** - Controls access to data. For example, managing access control lists.
  - **Monitor** - Monitors objects and system configuration but cannot configure, modify, or manage the system or its resources.
  - **ProtocolAdmin** - Manages object storage and data export definitions of SMB and NFS protocols.
  - **UserAdmin** - Manages access for GUI users. Users who are part of this group have edit permissions only in the Access pages of the GUI. Check IBM doc for updated list | +| scale_gui_email_notification: | false | false or true | no | Enable E-mail notifications in Spectrum Scale GUI | +| scale_gui_email: | none | scale_gui_email:
  name: 'SMTP_1'
  ipaddress: 'emailserverhost'
  ipport: '25'
  replay_email_address: scale-server-test@acme.com
  contact_name: 'scale-contact-person'
  subject: &cluster&message
  sender_login_id:
  password:
  headertext:
  footertext: | no | - The email feature transmits operational and error-related data in the form of an event notification email.
  - Email notifications can be customized by setting a custom header and footer for the emails and customizing the subject by selecting and combining from the following variables:
    &message, &messageId, &severity, &dateAndTime, &cluster and &component.
  
  - **name** - Specifies a name for the e-mail server.
  - **address** - Specifies the address of the e-mail server. Enter the SMTP server IP address or host name. For example, 10.45.45.12 or smtp.example.com.
  - **portNumber** - Specifies the port number of the e-mail server. Optional.
  - **reply_email_address/sender_address** - Specifies the sender's email address.
  - **contact_name/sender_name** - Specifies the sender's name.
  - **subject** Notifications can be customized by setting a custom header and footer or with variable like &cluster&message ## Variables: &message &messageId &severity &dateAndTime &cluster&component
  - **sender_login_id** - Login needed to authenticate sender with email server in case the login is different from the sender address (--reply). Optional.
  - **password** - Password used to authenticate sender address (--reply) or login id (--login) with the email sever | +| scale_gui_email_recipients: | none | scale_gui_email_recipients:
  name: 'name_email_recipient_name':
  address: 'email_recipient_address@email.com':
  components_security_level: 'SCALEMGMT=WARNING,CESNETWORK=WARNING':
  reports: 'DISK,GPFS,AUTH':
  quotaNotification: '--quotaNotification' ##if defined it enabled quota Notification:
  quotathreshold: '70.0' | no | **Options:**
  - **NAME**: Name of the email Recipients
  - **Address:** userAddress Specifies the address of the e-mail user
  - **Components_security_level**
     - The value scale_gui_email_recipients_components_security_level: Need to contain the **Component** and the **Warning/Security Level**
       - Chose component like **SCALEMGMT** and the security_level of WARNING wil be **SCALEMGMT=ERROR**
       - Security level: Chose the lowest severity of an event for which you want to receive and email. Example, selectin Tip includes events with severity Tip, Warning, and Error in the email.
       - The Severity level is as follows: : **INFO**, **TIP**, **WARNING**, **ERROR**
     **List of all security levels:**
      AFM=WARNING,AUTH=WARNING,BLOCK=WARNING,CESNETWORK=WARNING,CLOUDGATEWAY=WARNING,CLUSTERSTATE=WARNING,DISK=WARNING,FILEAUDITLOG=WARNING,
      FILESYSTEM=WARNING,GPFS=WARNING,GUI=WARNING,HADOOPCONNECTOR=WARNING,KEYSTONE=WARNING,MSGQUEUE=WARNING,NETWORK=WARNING,NFS=WARNING,
      OBJECT=WARNING,PERFMON=WARNING,SCALEMGMT=WARNING,SMB=WARNING,CUSTOM=WARNING,AUTH_OBJ=WARNING,CES=WARNING,CESIP=WARNING,NODE=WARNING,
      THRESHOLD=WARNING,WATCHFOLDER=WARNING,NVME=WARNING,POWERHW=WARNING
  - **Reports** listOfComponents
       - Specifies the components to be reported. The tasks generating reports are scheduled by default to send a report once per day. Optional.
       AFM,AUTH,BLOCK,CESNETWORK,CLOUDGATEWAY,CLUSTERSTATE,DISK,FILEAUDITLOG,FILESYSTEM,GPFS,GUI,HADOOPCONNECTOR,
       KEYSTONE,MSGQUEUE,NETWORK,NFS,OBJECT,PERFMON,SCALEMGMT,SMB,CUSTOM,AUTH_OBJ,CES,CESIP,NODE,THRESHOLD,WATCHFOLDER,NVME,POWERHW
  - **quotaNotification**
     Enables quota notifications which are sent out if the specified threshold is violated. (See --quotathreshold)
  - **quotathreshold** valueInPercent
     - Sets the threshold(percent of the hard limit)for including quota violations in the quota digest report.
     - The default value is 100. The values -3, -2, -1, and zero have special meaning.
     - Specify the value -2 to include all results, even entries where the hard quota not set.
     - Specify the value -1 to include all entries where hard quota is set and current usage is greater than or equal to the soft quota.
     - Specify the value -3 to include all entries where hard quota is not set and current usage is greater than or equal to the soft quota only.
     - Specify the value 0 to include all entries where the hard quota is set.
  Using unlisted options can lead to an error | +| scale_gui_snmp_notification: | false | true or false | no | Enable SNMP notifications in Spectrum Scale GUI | +| scale_gui_snmp_server: | false | scale_gui_snmp_server:
  ip_adress: 'snmp_server_host'
  ip_port: '162'
  community: 'Public' | no | - To Configure SNMP Notification.
    - Change the Value:
      - scale_gui_snmp_notification: true
       - ip_adress to your SNMP server/host
       - ip_port to your SNMP port
       - community to your SNMP community | + +**Role: NFS,SMB,OBJ - Protocol** + +| variables | Default | Options | User Mandatory | Descriptions | +|--------------------------|---------|---------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| scale_install_debuginfo: | true | true or false | no | Flag to install ganesha/nfs debug package | +| scale_install_debuginfo: | true | true or false | no | Flag to install smb debug package | +| scale_protocol_node: | none | true or false | no | Enable to set node to uses as Protcol Node, by host variable. | +| scale_protocols: #IPV4 | none | scale_protocols: #IPV4
  smb: true
  nfs: true
  object: true
  export_ip_pool: [192.168.100.100,192.168.100.101]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | To install Spectrum Scale Protocol. Refer to man mmces man pages for a description of these Cluster Export. scale_ces_groups can also be user to group nodes. | +| scale_protocols: #Ipv6 | none | scale_protocols: #Ipv6
  smb: true
  nfs: true
  object: true
  interface: [eth0]
  export_ip_pool: [2002:90b:e006:84:250:56ff:feb9:7787]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | For enabling Cluster Export Services in an IPv6 environment one also needs to define an interface parameter: scale_ces_groups can also be user to group nodes. | +| scale_ces_obj: | none | scale_ces_obj:
  dynamic_url: False
  enable_s3: False
  local_keystone: True
  enable_file_access: False
  endpoint_hostname: scale-11
  object_fileset: Object_Fileset
  pwd_file: obj_passwd.j2
  admin_user: admin
  admin_pwd: admin001
  database_pwd: admin001 | no | Missing descriptions. + + + +**Role: HDFS - Hadoop** + + +| variables | Default | Options | User Mandatory | Descriptions | +|----------------------|---------|-----------------------------|----------------|---------------------------------------------------------| +| ha_enabled: | false | true or false | no | HA for namenode in HDFS? | +| scale_hdfs_clusters: | none | - name: mycluster
  filesystem: gpfs1
  namenodes: ['host-vm1.test.net', 'host-vm2.test.net']
  datanodes: ['host-vm3.test.net', 'host-vm4.test.net', 'host-vm5.test.net']
  datadir: datadir | no | Install IBM Spectrum Scale (HDFS), "Document more" + + +**Role: zimon - Performance Monitoring** + +| variables | Default | Options | User Mandatory | Descriptions | +|------------------------|---------|---------------|----------------|-------------------------------------------------------------------------------| +| scale_zimon_collector: | false | true or false | no | Nodes default GUI collector role, its install the collector on all GUI nodes. | +| scale_cluster_gui | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | +| scale_cluster_zimon | false | true or false | no | Install up zimon enabled | + + +**Role: Fal - FileAudit Logging** + +| variables | Default | Options | User Mandatory | Descriptions | +|------------------------|---------|----------------|----------------|------------------------------------------------------------------------------| +| scale_fal_enable | true | true or false | no | Flag to enable fileauditlogging | + +**Role: CallHome** + +| variables | Default | Options | User Mandatory | Descriptions | +|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|----------------|--------------| +|scale_callhome_params: | None | scale_callhome_params:
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly]
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly] | no | Refer to man mmcallhome man pages for a description of these Call Homes
**scale_callhome_params**:
  is_enabled: true
  customer_name: abc
  customer_email: abc@abc.com
  customer_id: 12345
  customer_country: IN
  proxy_ip:
  proxy_port:
  proxy_user:
  proxy_password:
  proxy_location:
  callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
  callhome_group1: [scale01,scale02,scale03,scale04]
  callhome_schedule: [daily,weekly] | + + +**Role: remotemount_configure - Enabled and Configure Remote Mounting of Filesystem** + +| variables | Default | Options | User Mandatory | Descriptions | +|-------------------------------------------------|-------------------------------------------------|-------------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| scale_remotemount_client_gui_username: | none | username, example admin | yes | Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_client_gui_password: | none | password for user | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_client_gui_hostname: | none | 10.10.10.1 | yes | IP or Hostname to Client GUI Node | +| scale_remotemount_storage_gui_username: | none | | yes | Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_storage_gui_password: | none | | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_storage_gui_hostname: | none | | yes | IP or Hostname to Storage GUI Node | +| scale_remotemount_storage_adminnodename: | false | true or false | no | Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same.
   In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true | +| scale_remotemount_filesystem_name | none | scale_remotemount_filesystem_name
  scale_remotemount_client_filesystem_name:
  scale_remotemount_client_remotemount_path:
  scale_remotemount_storage_filesystem_name:
  scale_remotemount_access_mount_attributes:
  scale_remotemount_client_mount_fs:
  scale_remotemount_client_mount_priority: 0 | yes | The variables in the list needs to be in a list, as we now support mounting up more filesystem.

  - Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names.
  - Path to where the filesystem shoul be Mounted: /gpfs01/fs1
  - Storage Cluster filesystem you want to mount: gpfs01
  - Filesystem can be mounted in different access mount: RW or RO
  - Indicates when the file system is to be mounted: options are yes, no, automount (When the file system is first accessed.)
  - File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last.
   A value of zero indicates no priority. valid values: 0 - x | +| -   scale_remotemount_client_filesystem_name: | none | fs1 | yes | Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names. | +| -   scale_remotemount_client_remotemount_path: | none | /gpfs01/fs1 | yes | Path to where the filesystem shoul be Mounted. | +| -   scale_remotemount_storage_filesystem_name: | none | gpfs01 | yes | Storage Cluster filesystem you want to mount | +| -   scale_remotemount_access_mount_attributes: | rw | RW, RO | no | Filesystem can be mounted in different access mount: RW or RO | +| -   scale_remotemount_client_mount_fs: | yes | yes, no, automount | no | Indicates when the file system is to be mounted:** options are yes, no, automount (When the file system is first accessed.) | +| -   scale_remotemount_client_mount_priority: 0 | 0 | 0 - x | no | File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. | +| - | | | | | +| scale_remotemount_client_no_gui: | false | true or false | no | If Accessing/Client Cluster dont have GUI, it will use CLI/SSH against Client Cluster | +| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to pubkey | no | Client Cluster (Access) is downloading the pubkey from Owning cluster and importing it | +| scale_remotemount_cleanup_remote_mount: | false | true or false | no | Unmounts, remove the filesystem, and the connection between Accessing/Client cluster and Owner/Storage Cluster. This now works on clusters that not have GUI/RESTAPI interface on Client Cluster | +| scale_remotemount_debug: | false | true or false | no | Outputs debug information after tasks | +| scale_remotemount_forceRun: | false | true or false | no | If scale_remotemount_forceRun is passed in, then the playbook is attempting to run remote_mount role regardless of whether the filesystem is configured | +| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to | no | Client Cluster (Access) pubkey that is changed from json to right format and then used when creating connection | +| scale_remotemount_storage_pub_key_location_json | /tmp/storage_cluster_public_key_json.pub | path to | no | Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster | +| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| scale_remotemount_remotecluster_chipers: | AUTHONLY | AES128-SHA
AES256-SHA
AUTHONLY | no | Sets the security mode for communications between the current cluster and the remote cluster
Encryption can have performance effect and increased CPU usage
run **mmauth show ciphers** to check supported ciphers +| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| scale_remotemount_validate_certs_uri: | no | no | no | If Ansible URI module should validate https certificate for Spectrum Scale RestAPI interface. From 0afcabc3dde88696311196c7f3de1ec4f3e16442 Mon Sep 17 00:00:00 2001 From: Ole Kristian Date: Tue, 4 Jan 2022 15:17:36 +0100 Subject: [PATCH 040/113] - Updated Variabled to start with scale_remotemount* - Added option for encrypting traffic between remote cluster and storage cluster - changed some when: to read restapi error code insted of failed task. - Updated tasks that do check to not output error 400 and 200. - Added message to user when there is notting to clean up, like cluster connection and filesystem. - Cleaned up some more typo and text wording Signed-off-by: Ole Kristian --- docs/README.REMOTEMOUNT.md | 73 ++++++----- roles/remotemount_configure/defaults/main.yml | 19 +-- .../tasks/cleanup_filesystems.yml | 42 ++++--- .../tasks/cleanup_remote_mount.yml | 47 +++++--- .../tasks/cleanup_remote_mount_api_cli.yml | 34 ++++-- .../tasks/delete_remote_cluster.yml | 12 +- roles/remotemount_configure/tasks/main.yml | 34 +++--- .../tasks/mount_filesystem_api_cli.yml | 30 ++--- .../tasks/mount_filesystems.yml | 21 ++-- .../tasks/remotecluster.yml | 114 +++++++++--------- .../tasks/remotecluster_api_cli.yml | 59 ++++----- 11 files changed, 265 insertions(+), 220 deletions(-) diff --git a/docs/README.REMOTEMOUNT.md b/docs/README.REMOTEMOUNT.md index 3d896aaa..0cd7f628 100644 --- a/docs/README.REMOTEMOUNT.md +++ b/docs/README.REMOTEMOUNT.md @@ -15,12 +15,15 @@ Role Definition Features ----------------------------- -- Remote Mounts FS with API calls to Clusters Storage and Client -- Remote Mounts FS with API calls to Storage Clusters and CLI to Client/Accessing Cluster +- Remote Mounts FS with API calls to Cluster Storage and Client +- Remote Mounts FS with API calls to Storage Cluster and CLI to Client/Accessing Cluster - Cleanup Remote Mount from Client and Storage Servers - Remote Mount several filesystems in same ansible play. - Check's and add Remote Filesystems if not already there. -- Check if remote cluster is already defined. +- Check's if remote cluster is already defined. +- Added option for Security mode for communications between the current cluster and the remote cluster (Encryption) +- Mount filesystem on desired client cluster nodes. +- Option to specify either Deamon or Admin node name for cluster traffic. Limitation @@ -72,8 +75,18 @@ The following variables would need to be defined by the user, either as vars to - ``scale_remotemount_storage_pub_key_location_json:`` (Defaults to : "/tmp/storage_cluster_public_key_json.pub") **Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster** - ``scale_remotemount_storage_pub_key_delete:`` (Default to: true) **delete both temporary pubkey after the connection have been established** -- ``scale_remotemount_storage_adminnodename: true `` (Default to: false) **Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true** +- ``scale_remotemount_storage_adminnodename: `` (Default to: false) **Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true** +- ``scale_remotemount_remotecluster_chipers: `` (Default to: AUTHONLY) **Sets the security mode for communications between the current cluster and the remote cluster Encyption can have performance effect and increased CPU usage** + - Run the follwing command to check the supported ciphers: mmauth show ciphers + + ```console + Supported ciphers for nistCompliance=SP800-131A: + AES128-SHA + AES128-SHA256 + AES256-SHA + AES256-SHA256 + ``` - ``scale_remotemount_gpfsdemon_check: true ``(Default to: true) **Checks that GPFS deamon is started on GUI node, it will check the first server in NodeClass GUI_MGMT_SERVERS, this is the same flag to check when trying to mount up filesystems on all nodes. Check can be disabled with changing the flag to false.** @@ -112,19 +125,19 @@ There is also example playbook's in samples folder. You can use localhost, then all RestAPI call will occur over https to Storage and Client Cluster locally from where you run the Ansible playbook ```yaml - - hosts: localhost - vars: - scale_remotemount_client_gui_username: admin - scale_remotemount_client_gui_password: Admin@GUI - scale_remotemount_client_gui_hostname: 10.10.10.10 - scale_remotemount_storage_gui_username: admin - scale_remotemount_storage_gui_password: Admin@GUI - scale_remotemount_storage_gui_hostname: 10.10.10.20 - scale_remotemount_filesystem_name: - - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } - roles: - - remote_mount +- hosts: localhost + vars: + scale_remotemount_client_gui_username: admin + scale_remotemount_client_gui_password: Admin@GUI + scale_remotemount_client_gui_hostname: 10.10.10.10 + scale_remotemount_storage_gui_username: admin + scale_remotemount_storage_gui_password: Admin@GUI + scale_remotemount_storage_gui_hostname: 10.10.10.20 + scale_remotemount_filesystem_name: + - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables + - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } + roles: + - remote_mount ``` ``ansible-playbook -i hosts remotmount.yml`` @@ -136,18 +149,18 @@ You can use localhost, then all RestAPI call will occur over https to Storage an Following example will connect up to the first host in your ansible host file, and then run the playbook and do API Call to Storage Cluster. So in this case the Client Cluster node needs access on https/443 to Storage Cluster GUI Node. ```yaml - - hosts: scale-client-cluster-node-1 - gather_facts: false - vars: - scale_remotemount_storage_gui_username: admin - scale_remotemount_storage_gui_password: Admin@GUI - scale_remotemount_storage_gui_hostname: 10.10.10.20 - scale_remotemount_client_no_gui: true - scale_remotemount_filesystem_name: - - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } - roles: - - remote_mount +- hosts: scale-client-cluster-node-1 + gather_facts: false + vars: + scale_remotemount_storage_gui_username: admin + scale_remotemount_storage_gui_password: Admin@GUI + scale_remotemount_storage_gui_hostname: 10.10.10.20 + scale_remotemount_client_no_gui: true + scale_remotemount_filesystem_name: + - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables + - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } + roles: + - remote_mount ``` Firewall recommendations for communication among cluster's -------- @@ -175,7 +188,7 @@ to set the tscCmdPortRange configuration variable: Troubleshooting ------------------------ -- If you get **401 - Unauthorized** - Check that your user is working with a Curl, and that is have the correct Role. +- If you get **401 - Unauthorized** - Check that your user is working with a Curl, and that the user have the correct Role/Permission. ``-k`` will use insecure. diff --git a/roles/remotemount_configure/defaults/main.yml b/roles/remotemount_configure/defaults/main.yml index 24996686..75c0da42 100644 --- a/roles/remotemount_configure/defaults/main.yml +++ b/roles/remotemount_configure/defaults/main.yml @@ -4,19 +4,20 @@ scale_remotemount_debug: false scale_remotemount_forceRun: false # retries - 2 minutes (40 x 3 seconds) -restapi_retries_count: 40 -restapi_retries_delay: 3 +scale_remotemount_restapi_retries_count: 40 +scale_remotemount_restapi_retries_delay: 3 -client_cluster_gui_port: 443 -storage_cluster_gui_port: 443 +scale_remotemount_client_cluster_gui_port: 443 +scale_remotemount_storage_cluster_gui_port: 443 -scalemgmt_endpoint: "scalemgmt/v2" -remote_mount_endpoint: "{{ scalemgmt_endpoint }}/remotemount" +scale_remotemount_scalemgmt_endpoint: "scalemgmt/v2" +scale_remotemount_endpoint: "{{ scale_remotemount_scalemgmt_endpoint }}/remotemount" -validate_certs_uri: 'no' + +scale_remotemount_validate_certs_uri: 'no' # Temporary Storage for Public Key, Only used when debuging -scale_remote_mount_client_access_key: /tmp/client_cluster.pub +scale_remotemount_client_access_key: /tmp/client_cluster.pub # Sets the security mode for communications between the current cluster and the remote cluster # Encyption can have performance effect and increased CPU usage @@ -28,7 +29,7 @@ scale_remote_mount_client_access_key: /tmp/client_cluster.pub # AES256-SHA256 # AES128-SHA', 'AES256-SHA' , AUTHONLY -remotecluster_chipers: "AUTHONLY" +scale_remotemount_remotecluster_chipers: "AUTHONLY" # Storage filesystem # scale_remotemount_access_mount_attributes: "rw" diff --git a/roles/remotemount_configure/tasks/cleanup_filesystems.yml b/roles/remotemount_configure/tasks/cleanup_filesystems.yml index 203851ff..b26b6789 100644 --- a/roles/remotemount_configure/tasks/cleanup_filesystems.yml +++ b/roles/remotemount_configure/tasks/cleanup_filesystems.yml @@ -1,26 +1,27 @@ --- - name: "Cleanup | Client Cluster (access) | Check if the remotefilesystem is already defined {{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_filesystem_results ignore_errors: true run_once: True - name: "Cleanup | Client Cluster (access) | Remove defined filesystem {{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}" block: - - name: "Client Cluster (access) | Unmount the filesystem | PUT {{ scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount" + - name: "Client Cluster (access) | Unmount the filesystem | PUT {{ scale_remotemount_scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount method: PUT user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -36,22 +37,22 @@ - name: "Checking results from the job: {{ umount_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ umount_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ umount_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" - - name: "Client Cluster (access) | Delete the filesystem | DELETE {{ remote_mount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes" + - name: "Client Cluster (access) | Delete the filesystem | DELETE {{ scale_remotemount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes method: DELETE user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -61,15 +62,22 @@ - name: "Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - when: not remote_filesystem_results.failed + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + when: remote_filesystem_results.json.status.code == 200 run_once: True + +- name: "Cleanup | Client Cluster (access) | Output from remove defined filesystem {{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}" + run_once: True + debug: + msg: "The is no filesystem named ({{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}) - Message from Restapi: {{ remote_filesystem_results.json.status.message }}" + when: + - remote_filesystem_results.json.status.code == 400 \ No newline at end of file diff --git a/roles/remotemount_configure/tasks/cleanup_remote_mount.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml index 351e0b06..3275549c 100644 --- a/roles/remotemount_configure/tasks/cleanup_remote_mount.yml +++ b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml @@ -3,9 +3,9 @@ # - name: Cleanup | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -23,9 +23,9 @@ - name: Cleanup | Client Cluster (access) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -69,16 +69,16 @@ - name: Cleanup | Client Cluster (access) | List the remote cluster already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: remote_clusters_result run_once: True -- name: Cleanup | scale_remotemount_debug | Print out the remote clusters +- name: Cleanup | Client Cluster (access) | scale_remotemount_debug | Print out the remote clusters message code from RestAPI. debug: msg: "{{ remote_clusters_result.json }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -86,7 +86,7 @@ # The remote_clusters_results is in an array, so looping here incase there are multiple remote clusters # We want to delete the one where the owningCluster name matches what we are trying to do a remote mount on -- name: Cleanup | Delete the clusters on a loop... +- name: Cleanup | Client Cluster (access) | Delete the Remote Mount/clusters connection on a loop. include_tasks: delete_remote_cluster.yml when: item.owningCluster == owning_cluster_name loop: "{{ remote_clusters_result.json.owningClusters }}" @@ -98,26 +98,27 @@ - name: "Cleanup | Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: True - name: Cleanup | Storage Cluster (owner) | Delete the Client Cluster, if it exists block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -127,15 +128,23 @@ - name: "Cleanup | Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - when: not remote_clusters_results.failed - run_once: True \ No newline at end of file + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + #when: not remote_clusters_results.failed + when: remote_clusters_results.json.status.code == 200 + run_once: True + +- name: "Cleanup | Storage Cluster (owner) | Output from delete the Client Cluster, ('{{ access_cluster_name }}')" + run_once: True + debug: + msg: "The is no Client/Accessing cluster named: ({{ access_cluster_name }}) - Message from RestAPI: {{ remote_clusters_results.json.status.message }}" + when: + - remote_clusters_results.json.status.code == 400 \ No newline at end of file diff --git a/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml index a72791f8..cc880850 100644 --- a/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml +++ b/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml @@ -3,9 +3,9 @@ # - name: Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | GET the Owning Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -125,15 +125,16 @@ - name: "Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: True @@ -141,11 +142,11 @@ - name: Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | Delete the Client Cluster, if it exists. block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -155,15 +156,22 @@ - name: "Cleanup Remote Mount - API-CLI | Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - when: not remote_clusters_results.failed - run_once: True \ No newline at end of file + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + when: remote_clusters_results.json.status.code == 200 + run_once: True + +- name: "Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | Output from delete the Client Cluster, ('{{ access_cluster_name }}')" + run_once: True + debug: + msg: "The is no Client/Accessing cluster named: ({{ access_cluster_name }}) - Message from RestAPI: {{ remote_clusters_results.json.status.message }}" + when: + - remote_clusters_results.json.status.code == 400 \ No newline at end of file diff --git a/roles/remotemount_configure/tasks/delete_remote_cluster.yml b/roles/remotemount_configure/tasks/delete_remote_cluster.yml index 3c81da4a..2979ed13 100644 --- a/roles/remotemount_configure/tasks/delete_remote_cluster.yml +++ b/roles/remotemount_configure/tasks/delete_remote_cluster.yml @@ -4,9 +4,9 @@ # Only users with role 'Administrator' or 'CNSS Operator' have permission to for this REST endpoint. Read also the documentation of CLI command 'mmremotecluster delete'. - name: Client Cluster (access) | DELETE The remotecluster {{ owning_cluster_name }} ... uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters/{{ owning_cluster_name }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters/{{ owning_cluster_name }} method: DELETE user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -16,13 +16,13 @@ - name: Client Cluster (access) | Check the results from the DELETE uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" diff --git a/roles/remotemount_configure/tasks/main.yml b/roles/remotemount_configure/tasks/main.yml index b4a52b4c..17519f26 100644 --- a/roles/remotemount_configure/tasks/main.yml +++ b/roles/remotemount_configure/tasks/main.yml @@ -36,9 +36,9 @@ - block: # RESTAPI - when: scale_remotemount_client_no_gui == false - name: Main | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -73,9 +73,9 @@ - name: Main | Client Cluster (access) | Check Connectivity to Client Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -110,9 +110,9 @@ - name: Main | Client Cluster (access) | Check status of GPFS deamon (Nodeclass GUI_MGMT_SERVERS) uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -120,6 +120,7 @@ return_content: yes status_code: - 200 + - 400 register: clientcluster_gpfs_deamon_status run_once: True when: scale_remotemount_gpfsdemon_check | bool @@ -128,26 +129,26 @@ run_once: True ignore_errors: true debug: - msg: "{{ clientcluster_gpfs_deamon_status.json.states[0].state }}" + msg: "Status of GPFS Deamon: {{ clientcluster_gpfs_deamon_status.json.states[0].state }} - Rest API status message: {{ clientcluster_gpfs_deamon_status.json.status.message }}" when: - scale_remotemount_gpfsdemon_check | bool - scale_remotemount_debug is defined - scale_remotemount_debug | bool - - name: Main | Client Cluster (access) | GPFS Deamon on Client Cluster is down (Nodeclass GUI_MGMT_SERVERS) + - name: Main | Client Cluster (access) | GPFS Deamon on Client Cluster (Nodeclass GUI_MGMT_SERVERS) run_once: True assert: that: - - "'HEALTHY' or 'DEGRADED' in storagecluster_gpfs_deamon_status.json.states[0].state" + - "'HEALTHY' or 'DEGRADED' in clientcluster_gpfs_deamon_status.json.states[0].state" fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" when: scale_remotemount_gpfsdemon_check | bool - name: Main | Storage Cluster (owning) | Check status of gpfs deamon on GUI_MGMT_SERVERS uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -155,6 +156,7 @@ return_content: yes status_code: - 200 + - 400 register: storagecluster_gpfs_deamon_status run_once: True when: scale_remotemount_gpfsdemon_check | bool @@ -169,7 +171,7 @@ - scale_remotemount_debug is defined - scale_remotemount_debug | bool - - name: Main | Storage Cluster (owning) | GPFS Deamon on Storage Cluster is down (Nodeclass GUI_MGMT_SERVERS) + - name: Main | Storage Cluster (owning) | GPFS Deamon on Storage Cluster (Nodeclass GUI_MGMT_SERVERS) run_once: True assert: that: @@ -195,9 +197,9 @@ - block: # RESTAPI-CLI when: scale_remotemount_client_no_gui == true - name: Main | API-CLI | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -232,9 +234,9 @@ - name: Main | API-CLI | Storage Cluster (owning) | Check status of gpfs deamon on GUI_MGMT_SERVERS uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" diff --git a/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml index ee2aa3c8..aef036b6 100644 --- a/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml +++ b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml @@ -9,9 +9,9 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -52,14 +52,14 @@ - name: Step 5 - Mount Filesystem - Rest-API debug: - msg: "On Storage Cluster, Check if filesystems is allready accessible for Client Cluster" + msg: "On Storage Cluster, Check if filesystems is already accessible for Client Cluster" run_once: True -- name: "Mount Filesystem - Rest-API | Storage Cluster (owner) | Check if filesystems is allready accessible for Client Cluster ('{{ access_cluster_name }}')" +- name: "Mount Filesystem - Rest-API | Storage Cluster (owner) | Check if filesystems is already accessible for Client Cluster ('{{ access_cluster_name }}')" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -92,9 +92,9 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owning) | Allow and Set the client cluster filesystem access attributes on the Storage Cluster uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -121,16 +121,16 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster ##"{{ completed_check.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True loop: "{{ uri_result.results }}" when: @@ -214,9 +214,9 @@ when: "'down' in gpfs_deamon_state.stdout" run_once: true -# Not adding any check here, run only when when mmremotefs add task is also run. +# Not adding any check here, runs only when when mmremotefs add task is also run. -- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Output is from previous task, check if the filesystem's is allready mounted +- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Output is from previous task, checks if the filesystem's is already mounted run_once: True command: /usr/lpp/mmfs/bin/mmmount {{ item.item.scale_remotemount_client_filesystem_name }} -N {{ scale_remotemount_client_mount_on_nodes | default('all') }} loop: "{{ remote_filesystem_results_cli.results }}" @@ -230,7 +230,7 @@ # Adding a stdout from previous as the stdout from the loop abow can be confusing when several loops. -- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Shows stdout from the previous task. +- name: Client Cluster (access) | Mount remote filesystem on all client nodes - shows stdout from the previous task. debug: msg: "Message from mount remote filesystem task: {{ item }}" loop: "{{ client_cluster_mount_remotefs | json_query('results[*].stdout') }}" diff --git a/roles/remotemount_configure/tasks/mount_filesystems.yml b/roles/remotemount_configure/tasks/mount_filesystems.yml index c1d80092..a36c2317 100644 --- a/roles/remotemount_configure/tasks/mount_filesystems.yml +++ b/roles/remotemount_configure/tasks/mount_filesystems.yml @@ -11,9 +11,9 @@ # - name: Client Cluster (access) | Check status of GPFS deamon on all nodes before mounting filesystem. uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/%3Aall%3A/health/states?fields=component%2Cstate&filter=component%3DGPFS%2Cstate%3DFAILED + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/%3Aall%3A/health/states?fields=component%2Cstate&filter=component%3DGPFS%2Cstate%3DFAILED method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -52,15 +52,16 @@ - name: Client Cluster (access) | Check if the remotefilesystem is already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems/{{ filesystem_loop.scale_remotemount_client_filesystem_name }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems/{{ filesystem_loop.scale_remotemount_client_filesystem_name }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_filesystem_results ignore_errors: true run_once: True @@ -74,9 +75,9 @@ - name: Client Cluster (access) | Create the remotefs and then mount the filesystem uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems method: POST user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -98,16 +99,16 @@ - name: "Client Cluster (access) | Check the result of adding the remotefs and mounting the filesystem (JOB: {{ send_key.json.jobs[0].jobId }})" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: (remote_filesystem_results.status == 400) \ No newline at end of file diff --git a/roles/remotemount_configure/tasks/remotecluster.yml b/roles/remotemount_configure/tasks/remotecluster.yml index 5b83405c..4da9dba6 100644 --- a/roles/remotemount_configure/tasks/remotecluster.yml +++ b/roles/remotemount_configure/tasks/remotecluster.yml @@ -9,9 +9,9 @@ - name: Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -29,9 +29,9 @@ - name: Client Cluster (access) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -76,28 +76,29 @@ # - name: "Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: True # -# TODO: there is no Check if the Storage Cluster (Owner) is allready defined on Client Cluster, so in some cases where storage cluster have connection to client cluster (mmauth) but the client cluster don't have, the playbook will fail +# TODO: there is no Check if the Storage Cluster (Owner) is already defined on Client Cluster, so in some cases where storage cluster have connection to client cluster (mmauth) but the client cluster don't have, the playbook will fail # as the owningcluster is in a array, we need to loop over or make list of the array to be able to use when: # - name: Client Cluster (access) | List the remote cluster already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -126,11 +127,11 @@ - name: Storage Cluster (owner) | Delete the Client Cluster, if it exists block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -140,16 +141,16 @@ - name: "Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" when: not remote_clusters_results.failed and scale_remotemount_forceRun | bool run_once: True @@ -162,9 +163,9 @@ - name: Client Cluster (access) | Get the Public Key uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/authenticationkey + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/authenticationkey method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -182,9 +183,9 @@ - name: Storage Cluster (owner) | Send the Public Key of the Client Cluster (access) uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -192,6 +193,7 @@ body: | { "remoteCluster": "{{ access_cluster_name }}", + "ciphers": ["{{ scale_remotemount_remotecluster_chipers }}"], "key": {{ accesskey_result.json.key | trim | replace(", ", ",") }} } status_code: @@ -201,16 +203,16 @@ - name: "Storage Cluster (owner) | Check the result of adding the Client Cluster {{ send_key.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status != "FAILED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True # @@ -219,9 +221,9 @@ - name: Storage Cluster (owner) | Get the Public Key uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/authenticationkey + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/authenticationkey method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -244,9 +246,9 @@ - name: Client Cluster (access) | List the remote cluster already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -264,11 +266,11 @@ # # This section is to gather the nodenames and adminNodeName # - - name: "Storage Cluster (owning) | GET AdminNodeNames Info - GET {{ scalemgmt_endpoint }}/nodes" + - name: "Storage Cluster (owning) | GET AdminNodeNames Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -290,11 +292,11 @@ # # This Section is when using daemonNodeName # - - name: "Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scalemgmt_endpoint }}/nodes/" + - name: "Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes/" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/{{item}} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/{{item}} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -325,9 +327,9 @@ - name: Client Cluster (access) | Add Storage Cluster as a Remote Cluster with adminNodeName uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: POST user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -346,16 +348,16 @@ - name: "Client Cluster (access) | Check the result of adding the remote Storage Cluster with adminNodeName (JOB: {{ adminnode_uri_result.json.jobs[0].jobId }})" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ adminnode_uri_result.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ adminnode_uri_result.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: scale_remotemount_storage_adminnodename is defined and scale_remotemount_storage_adminnodename | bool # @@ -369,9 +371,9 @@ - name: Client Cluster (access) | Add Storage Cluster as a Remote Cluster with DeamonNodeName uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: POST user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -390,16 +392,16 @@ - name: "Client Cluster (access) | Check the result of adding the remote Storage Cluster with DeamonNodeName (JOB: {{ daemonnodesname_uri_result.json.jobs[0].jobId }})" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ daemonnodesname_uri_result.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ daemonnodesname_uri_result.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: not scale_remotemount_storage_adminnodename when: @@ -407,14 +409,14 @@ - name: Step 5 - Configure and Mount filesystems debug: - msg: "On Storage Cluster, Check if filesystems is allready accessible for Client Cluster" + msg: "On Storage Cluster, Check if filesystems is already accessible for Client Cluster" run_once: True -- name: "Mount Filesystem | Storage Cluster (owner) | Check if filesystems is allready accessible for Client Cluster ('{{ access_cluster_name }}')" +- name: "Mount Filesystem | Storage Cluster (owner) | Check if filesystems is already accessible for Client Cluster ('{{ access_cluster_name }}')" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -449,9 +451,9 @@ - name: Mount Filesystem| Storage Cluster (owning) | Allow and Set the client cluster filesystem access attributes on the Storage Cluster uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -478,16 +480,16 @@ - name: Mount Filesystem | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster ##"{{ item.json.jobs.0['jobId'] }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True loop: "{{ uri_result.results }}" when: diff --git a/roles/remotemount_configure/tasks/remotecluster_api_cli.yml b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml index 02303b6f..c7a14c53 100644 --- a/roles/remotemount_configure/tasks/remotecluster_api_cli.yml +++ b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml @@ -9,9 +9,9 @@ - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -53,19 +53,20 @@ - name: Step 2 - Remote Cluster Config - API-CLI debug: - msg: "Check if the Remote Cluster is allready configured" + msg: "Check if the Remote Cluster is already configured" - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: true @@ -80,11 +81,11 @@ - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Delete the Client Cluster, if it exists block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -95,16 +96,16 @@ - name: "Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: - not remote_clusters_results.failed and scale_remotemount_forceRun | bool @@ -184,9 +185,9 @@ when: scale_remotemount_debug is defined and scale_remotemount_debug | bool run_once: True - - name: Remote Cluster Config - API-CLI | Client Cluster (accesing) | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results to file ("{{ scale_remote_mount_client_access_key }}") + - name: Remote Cluster Config - API-CLI | Client Cluster (accesing) | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results to file ("{{ scale_remotemount_client_access_key }}") copy: - dest: "{{ scale_remote_mount_client_access_key }}" + dest: "{{ scale_remotemount_client_access_key }}" content: "{{ accesskey_result }}\n" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool run_once: True @@ -196,9 +197,9 @@ - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Send the Public Key of the Client Cluster (access) to Storage Cluster (Owner) uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -206,7 +207,7 @@ body: | { "remoteCluster": "{{ access_cluster_name }}", - "ciphers": ["{{ remotecluster_chipers }}"], + "ciphers": ["{{ scale_remotemount_remotecluster_chipers }}"], "key": {{ accesskey_result.stdout_lines }} } status_code: @@ -218,25 +219,25 @@ - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | Check the result of adding the Client Cluster {{ send_key.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status != "FAILED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Get the Public Key uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/authenticationkey/ + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/authenticationkey/ method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -273,11 +274,11 @@ # # This Section is gather the nodenames and adminNodeName # - - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET adminNodeName Info - GET {{ scalemgmt_endpoint }}/nodes" + - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET adminNodeName Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -299,11 +300,11 @@ # # This Section is when using daemonNodeName # - - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scalemgmt_endpoint }}/nodes/" + - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes/" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/{{item}} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/{{item}} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" From 2fd0a1a4616dfef2b37f848c9dcee71f5ac7ecd0 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 11 Jan 2022 09:03:28 +0100 Subject: [PATCH 041/113] Add README.md (symlink) in each role directory Signed-off-by: Achim Christ --- roles/afm_cos_install/README.md | 1 + roles/afm_cos_prepare/README.md | 1 + roles/afm_cos_upgrade/README.md | 1 + roles/afm_cos_verify/README.md | 1 + roles/auth_upgrade/README.md | 1 + roles/callhome_configure/README.md | 1 + roles/callhome_install/README.md | 1 + roles/callhome_prepare/README.md | 1 + roles/callhome_verify/README.md | 1 + roles/ces_common/README.md | 1 + roles/core_common/README.md | 1 + roles/core_configure/README.md | 1 + roles/core_install/README.md | 1 + roles/core_prepare/README.md | 1 + roles/core_upgrade/README.md | 1 + roles/core_verify/README.md | 1 + roles/ece_configure/README.md | 1 + roles/ece_install/README.md | 1 + roles/ece_prepare/README.md | 1 + roles/ece_upgrade/README.md | 1 + roles/fal_configure/README.md | 1 + roles/fal_install/README.md | 1 + roles/fal_prepare/README.md | 1 + roles/fal_upgrade/README.md | 1 + roles/fal_verify/README.md | 1 + roles/gui_configure/README.md | 1 + roles/gui_install/README.md | 1 + roles/gui_prepare/README.md | 1 + roles/gui_upgrade/README.md | 1 + roles/gui_verify/README.md | 1 + roles/hdfs_configure/README.md | 1 + roles/hdfs_install/README.md | 1 + roles/hdfs_prepare/README.md | 1 + roles/hdfs_upgrade/README.md | 1 + roles/hdfs_verify/README.md | 1 + roles/nfs_configure/README.md | 1 + roles/nfs_install/README.md | 1 + roles/nfs_prepare/README.md | 1 + roles/nfs_upgrade/README.md | 1 + roles/nfs_verify/README.md | 1 + roles/obj_configure/README.md | 1 + roles/obj_install/README.md | 1 + roles/obj_prepare/README.md | 1 + roles/obj_upgrade/README.md | 1 + roles/obj_verify/README.md | 1 + roles/perfmon_configure/README.md | 1 + roles/perfmon_install/README.md | 1 + roles/perfmon_prepare/README.md | 1 + roles/perfmon_upgrade/README.md | 1 + roles/perfmon_verify/README.md | 1 + roles/remotemount_configure/README.md | 1 + roles/smb_configure/README.md | 1 + roles/smb_install/README.md | 1 + roles/smb_prepare/README.md | 1 + roles/smb_upgrade/README.md | 1 + roles/smb_verify/README.md | 1 + 56 files changed, 56 insertions(+) create mode 120000 roles/afm_cos_install/README.md create mode 120000 roles/afm_cos_prepare/README.md create mode 120000 roles/afm_cos_upgrade/README.md create mode 120000 roles/afm_cos_verify/README.md create mode 120000 roles/auth_upgrade/README.md create mode 120000 roles/callhome_configure/README.md create mode 120000 roles/callhome_install/README.md create mode 120000 roles/callhome_prepare/README.md create mode 120000 roles/callhome_verify/README.md create mode 120000 roles/ces_common/README.md create mode 120000 roles/core_common/README.md create mode 120000 roles/core_configure/README.md create mode 120000 roles/core_install/README.md create mode 120000 roles/core_prepare/README.md create mode 120000 roles/core_upgrade/README.md create mode 120000 roles/core_verify/README.md create mode 120000 roles/ece_configure/README.md create mode 120000 roles/ece_install/README.md create mode 120000 roles/ece_prepare/README.md create mode 120000 roles/ece_upgrade/README.md create mode 120000 roles/fal_configure/README.md create mode 120000 roles/fal_install/README.md create mode 120000 roles/fal_prepare/README.md create mode 120000 roles/fal_upgrade/README.md create mode 120000 roles/fal_verify/README.md create mode 120000 roles/gui_configure/README.md create mode 120000 roles/gui_install/README.md create mode 120000 roles/gui_prepare/README.md create mode 120000 roles/gui_upgrade/README.md create mode 120000 roles/gui_verify/README.md create mode 120000 roles/hdfs_configure/README.md create mode 120000 roles/hdfs_install/README.md create mode 120000 roles/hdfs_prepare/README.md create mode 120000 roles/hdfs_upgrade/README.md create mode 120000 roles/hdfs_verify/README.md create mode 120000 roles/nfs_configure/README.md create mode 120000 roles/nfs_install/README.md create mode 120000 roles/nfs_prepare/README.md create mode 120000 roles/nfs_upgrade/README.md create mode 120000 roles/nfs_verify/README.md create mode 120000 roles/obj_configure/README.md create mode 120000 roles/obj_install/README.md create mode 120000 roles/obj_prepare/README.md create mode 120000 roles/obj_upgrade/README.md create mode 120000 roles/obj_verify/README.md create mode 120000 roles/perfmon_configure/README.md create mode 120000 roles/perfmon_install/README.md create mode 120000 roles/perfmon_prepare/README.md create mode 120000 roles/perfmon_upgrade/README.md create mode 120000 roles/perfmon_verify/README.md create mode 120000 roles/remotemount_configure/README.md create mode 120000 roles/smb_configure/README.md create mode 120000 roles/smb_install/README.md create mode 120000 roles/smb_prepare/README.md create mode 120000 roles/smb_upgrade/README.md create mode 120000 roles/smb_verify/README.md diff --git a/roles/afm_cos_install/README.md b/roles/afm_cos_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/afm_cos_prepare/README.md b/roles/afm_cos_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/afm_cos_upgrade/README.md b/roles/afm_cos_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/afm_cos_verify/README.md b/roles/afm_cos_verify/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_verify/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/auth_upgrade/README.md b/roles/auth_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/auth_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/callhome_configure/README.md b/roles/callhome_configure/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_configure/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome_install/README.md b/roles/callhome_install/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_install/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome_prepare/README.md b/roles/callhome_prepare/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome_verify/README.md b/roles/callhome_verify/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_verify/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/ces_common/README.md b/roles/ces_common/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ces_common/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core_common/README.md b/roles/core_common/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_common/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core_configure/README.md b/roles/core_configure/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_configure/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core_install/README.md b/roles/core_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core_prepare/README.md b/roles/core_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core_upgrade/README.md b/roles/core_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core_verify/README.md b/roles/core_verify/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_verify/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/ece_configure/README.md b/roles/ece_configure/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_configure/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/ece_install/README.md b/roles/ece_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/ece_prepare/README.md b/roles/ece_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/ece_upgrade/README.md b/roles/ece_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/fal_configure/README.md b/roles/fal_configure/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_configure/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/fal_install/README.md b/roles/fal_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/fal_prepare/README.md b/roles/fal_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/fal_upgrade/README.md b/roles/fal_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/fal_verify/README.md b/roles/fal_verify/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_verify/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/gui_configure/README.md b/roles/gui_configure/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_configure/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui_install/README.md b/roles/gui_install/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_install/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui_prepare/README.md b/roles/gui_prepare/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui_upgrade/README.md b/roles/gui_upgrade/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui_verify/README.md b/roles/gui_verify/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_verify/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/hdfs_configure/README.md b/roles/hdfs_configure/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_configure/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/hdfs_install/README.md b/roles/hdfs_install/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_install/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/hdfs_prepare/README.md b/roles/hdfs_prepare/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/hdfs_upgrade/README.md b/roles/hdfs_upgrade/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/hdfs_verify/README.md b/roles/hdfs_verify/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_verify/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/nfs_configure/README.md b/roles/nfs_configure/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_configure/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs_install/README.md b/roles/nfs_install/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_install/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs_prepare/README.md b/roles/nfs_prepare/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs_upgrade/README.md b/roles/nfs_upgrade/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs_verify/README.md b/roles/nfs_verify/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_verify/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/obj_configure/README.md b/roles/obj_configure/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_configure/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/obj_install/README.md b/roles/obj_install/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_install/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/obj_prepare/README.md b/roles/obj_prepare/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/obj_upgrade/README.md b/roles/obj_upgrade/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/obj_verify/README.md b/roles/obj_verify/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_verify/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/perfmon_configure/README.md b/roles/perfmon_configure/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_configure/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/perfmon_install/README.md b/roles/perfmon_install/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_install/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/perfmon_prepare/README.md b/roles/perfmon_prepare/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/perfmon_upgrade/README.md b/roles/perfmon_upgrade/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/perfmon_verify/README.md b/roles/perfmon_verify/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_verify/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/remotemount_configure/README.md b/roles/remotemount_configure/README.md new file mode 120000 index 00000000..d978b78e --- /dev/null +++ b/roles/remotemount_configure/README.md @@ -0,0 +1 @@ +../../docs/README.REMOTEMOUNT.md \ No newline at end of file diff --git a/roles/smb_configure/README.md b/roles/smb_configure/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_configure/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb_install/README.md b/roles/smb_install/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_install/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb_prepare/README.md b/roles/smb_prepare/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb_upgrade/README.md b/roles/smb_upgrade/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb_verify/README.md b/roles/smb_verify/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_verify/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file From 6169c37794187b99934febc2a6f6edbf25f47808 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 11 Jan 2022 09:15:38 +0100 Subject: [PATCH 042/113] Add runtime metadata Signed-off-by: Achim Christ --- meta/runtime.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 meta/runtime.yml diff --git a/meta/runtime.yml b/meta/runtime.yml new file mode 100644 index 00000000..43bbe450 --- /dev/null +++ b/meta/runtime.yml @@ -0,0 +1,2 @@ +--- +requires_ansible: '>=2.9' From 087c6e84dd4dafa5e2f588c91d9314112cd7f90e Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 11 Jan 2022 09:30:11 +0100 Subject: [PATCH 043/113] Link 'Docs Site' directly to README Signed-off-by: Achim Christ --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 50128fed..66d4d537 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -58,7 +58,7 @@ dependencies: {} repository: https://github.com/IBM/ibm-spectrum-scale-install-infra # The URL to any online docs -documentation: https://github.com/IBM/ibm-spectrum-scale-install-infra +documentation: https://github.com/IBM/ibm-spectrum-scale-install-infra#readme # The URL to the homepage of the collection/project homepage: From ea16d2bdaae0194f848b2b1d8d23b55b456a0036 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 11 Jan 2022 10:45:01 +0100 Subject: [PATCH 044/113] Define minimum Ansible version according to Collections checklist Signed-off-by: Achim Christ --- meta/runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meta/runtime.yml b/meta/runtime.yml index 43bbe450..2ee3c9fa 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,2 +1,2 @@ --- -requires_ansible: '>=2.9' +requires_ansible: '>=2.9.10' From 98c93b8b5bc5f658b36d4518a0b8a269be5b2bf3 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 11 Mar 2022 11:11:07 +0100 Subject: [PATCH 045/113] Initial draft version --- MIGRATING.md | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 MIGRATING.md diff --git a/MIGRATING.md b/MIGRATING.md new file mode 100644 index 00000000..296a8fb7 --- /dev/null +++ b/MIGRATING.md @@ -0,0 +1,72 @@ +# Migrating from master to main + +This Git repository has two branches: `master`, which is now stable, and `main`, which is where new functionality will be implemented. Your playbooks need to be adjusted when switching from one branch to the other, and these adjustments are outlined in this document. + +## What's changing? + +A long term goal of this project is to make the code available through [Ansible Galaxy](https://galaxy.ansible.com/). It became clear that changes to the project's directory structure would be inevitable to follow the conventions imposed by Galaxy (i.e. [Collections format](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html)) — and this was taken as an opportunity to also rename all existing roles and some variables for consistency. See [#570](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/570), [#572](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/572), and [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details. + +All playbooks using the Ansible roles provided by this project need to adapt this new naming scheme, in order to use the latest updates implemented in the `main` branch. + +**Important**: The `master` branch (previous default) will stay with the current naming scheme. It is considered stable, which means that only critical bug fixes will be added. New functionality will solely be implemented in the `main` (new default) branch. + +## What do I need to do? + +The following steps need to be taken in order to consume the `main` branch in your own projects: + +- Repository contents need to be placed in a `collections/ansible_collections/ibm/spectrum_scale` directory, adjacent to your playbooks. The easiest way to do this is to clone the correct branch into the appropriate path: + + ```shell + $ git clone -b main https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale + ``` + + The resulting directory structure should look similar to this: + + ```shell + my_project/ + ├── collections/ + │ └── ansible_collections/ + │ └── ibm/ + │ └── spectrum_scale/ + │ └── ... + ├── hosts + └── playbook.yml + ``` + +- Once the repository contents are available in the appropriate path, roles can be referenced by using their Fully Qualified Collection Name (FQCN). A minimal playbook should look similar to this: + + ```yaml + # playbook.yml: + --- + - hosts: cluster01 + roles: + - ibm.spectrum_scale.core_prepare + - ibm.spectrum_scale.core_install + - ibm.spectrum_scale.core_configure + - ibm.spectrum_scale.core_verify + ``` + + Refer to the [Ansible User Guide](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook) for details on using collections, including alternate syntax with the `collections` keyword. + + Note that all role names have changed: + + - Old naming: `[component]/[precheck|node|cluster|postcheck]` + - New naming: `[component]_[prepare|install|configure|verify]` + + Refer to the examples in the [samples/](samples/) directory for a list of new role names. + +- Some variables have been renamed for consistency as well, but it's expected that these changes only affect very few users. See [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details, and refer to [VARIABLESNEW.md](VARIABLESNEW.md) for a complete listing of all available variables. + +## Migration script + +If you have existing playbooks which reference roles provided by this project, and you wish to migrate to the new format, then there is a [migration script](migrate.sh) available to replace all occurrences of role names in a given file. You can use the migration script like so: + +```shell +$ ./migrate.sh playbook.yml +``` + +Note that the script will create a backup of the file prior to making any changes. Further note that the script does not perform any kind of syntax checking — so you will need to manually verify that the resulting code is syntactically correct. + +## What if I need help? + +Create a [new issue](https://github.com/IBM/ibm-spectrum-scale-install-infra/issues/new) and provide (the relevant parts of) your playbook, along with the exact error message. From 8f34f6101b140ad6ba9ff8ef12cf1e67f6eff12b Mon Sep 17 00:00:00 2001 From: Dherendra Singh Date: Fri, 4 Feb 2022 18:51:56 +0530 Subject: [PATCH 046/113] hdfs-3220 feature --- roles/hdfs_configure/tasks/configure.yml | 4 ++-- roles/hdfs_install/defaults/main.yml | 3 +++ roles/hdfs_install/tasks/install.yml | 5 +++++ roles/hdfs_prepare/tasks/check.yml | 3 +++ roles/hdfs_prepare/tasks/prepare_env.yml | 27 +++++++++++++++++++----- roles/hdfs_upgrade/defaults/main.yml | 3 +++ roles/hdfs_upgrade/tasks/prepare_env.yml | 25 ++++++++++++++++++---- roles/hdfs_upgrade/tasks/upgrade.yml | 7 +++++- 8 files changed, 65 insertions(+), 12 deletions(-) diff --git a/roles/hdfs_configure/tasks/configure.yml b/roles/hdfs_configure/tasks/configure.yml index e7428461..911099bc 100644 --- a/roles/hdfs_configure/tasks/configure.yml +++ b/roles/hdfs_configure/tasks/configure.yml @@ -328,8 +328,8 @@ command: /usr/lpp/mmfs/bin/mmces service enable HDFS - name: Start Namenodes - shell: /usr/lpp/mmfs/hadoop/sbin/mmhdfs hdfs-nn restart - register: start_nn_status + shell: /usr/lpp/mmfs/bin/mmces service start HDFS -N "{{ item }}" + with_items: "{{ scale_hdfs_namenodes_list }}" - name: Check Namenodes running status shell: /usr/lpp/mmfs/hadoop/sbin/mmhdfs hdfs-nn status | grep 'namenode pid is' | wc -l diff --git a/roles/hdfs_install/defaults/main.yml b/roles/hdfs_install/defaults/main.yml index c2c41d9a..a7c01e37 100644 --- a/roles/hdfs_install/defaults/main.yml +++ b/roles/hdfs_install/defaults/main.yml @@ -20,6 +20,9 @@ scale_hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package scale_hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' +# Directory to install 3.2.2.x hdfs package +scale_hdfs_rhel_version_path_322: 'hdfs_rpms/rhel/hdfs_3.2.2.x/' + # Directory to install 3.1.1.x hdfs package scale_hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' diff --git a/roles/hdfs_install/tasks/install.yml b/roles/hdfs_install/tasks/install.yml index 7659b601..88fa0cf6 100644 --- a/roles/hdfs_install/tasks/install.yml +++ b/roles/hdfs_install/tasks/install.yml @@ -95,6 +95,11 @@ hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_33 }}" when: transparency_33_enabled|bool + - name: install | Fetch hdfs version + set_fact: + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_322 }}" + when: transparency_322_enabled|bool + - name: install | Fetch hdfs rpm dir path for rhel set_fact: hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" diff --git a/roles/hdfs_prepare/tasks/check.yml b/roles/hdfs_prepare/tasks/check.yml index 28d3679e..46d43177 100644 --- a/roles/hdfs_prepare/tasks/check.yml +++ b/roles/hdfs_prepare/tasks/check.yml @@ -4,6 +4,9 @@ - debug: msg: "transparency_33_enabled: {{ transparency_33_enabled|bool }}" +- debug: + msg: "transparency_322_enabled: {{ transparency_322_enabled|bool }}" + - name: global_var | Initialize set_fact: scale_hdfs_cluster: [] diff --git a/roles/hdfs_prepare/tasks/prepare_env.yml b/roles/hdfs_prepare/tasks/prepare_env.yml index 4b52c076..ff52a973 100644 --- a/roles/hdfs_prepare/tasks/prepare_env.yml +++ b/roles/hdfs_prepare/tasks/prepare_env.yml @@ -2,19 +2,36 @@ - name: set_fact: transparency_33_enabled: "False" - transparency_version: "False" + transparency_322_enabled: "False" + transparency_version_33: "False" + transparency_version_322: "False" - name: shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_33_ENABLE" - register: transparency_version + register: transparency_version_33 + delegate_to: localhost + run_once: true + +- name: + shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_322_ENABLE" + register: transparency_version_322 + delegate_to: localhost + run_once: true + +- name: + set_fact: + transparency_33_enabled: "{{ transparency_version_33.stdout|bool }}" + when: + - transparency_version_33.stdout is defined + - transparency_version_33.stdout|bool delegate_to: localhost run_once: true - name: set_fact: - transparency_33_enabled: "{{ transparency_version.stdout|bool }}" + transparency_322_enabled: "{{ transparency_version_322.stdout|bool }}" when: - - transparency_version.stdout is defined - - transparency_version.stdout|bool + - transparency_version_322.stdout is defined + - transparency_version_322.stdout|bool delegate_to: localhost run_once: true diff --git a/roles/hdfs_upgrade/defaults/main.yml b/roles/hdfs_upgrade/defaults/main.yml index 74f48ac1..74bea331 100644 --- a/roles/hdfs_upgrade/defaults/main.yml +++ b/roles/hdfs_upgrade/defaults/main.yml @@ -21,6 +21,9 @@ scale_hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package scale_hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' +# Directory to install 3.2.2.x hdfs package +scale_hdfs_rhel_version_path_322: 'hdfs_rpms/rhel/hdfs_3.2.2.x/' + # Directory to install 3.1.1.x hdfs package scale_hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' diff --git a/roles/hdfs_upgrade/tasks/prepare_env.yml b/roles/hdfs_upgrade/tasks/prepare_env.yml index 4b52c076..e58acfef 100644 --- a/roles/hdfs_upgrade/tasks/prepare_env.yml +++ b/roles/hdfs_upgrade/tasks/prepare_env.yml @@ -2,11 +2,19 @@ - name: set_fact: transparency_33_enabled: "False" - transparency_version: "False" + transparency_322_enabled: "False" + transparency_version_33: "False" + transparency_version_322: "False" - name: shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_33_ENABLE" - register: transparency_version + register: transparency_version_33 + delegate_to: localhost + run_once: true + +- name: + shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_322_ENABLE" + register: transparency_version_322 delegate_to: localhost run_once: true @@ -14,7 +22,16 @@ set_fact: transparency_33_enabled: "{{ transparency_version.stdout|bool }}" when: - - transparency_version.stdout is defined - - transparency_version.stdout|bool + - transparency_version_33.stdout is defined + - transparency_version_33.stdout|bool + delegate_to: localhost + run_once: true + +- name: + set_fact: + transparency_322_enabled: "{{ transparency_version.stdout|bool }}" + when: + - transparency_version_322.stdout is defined + - transparency_version_322.stdout|bool delegate_to: localhost run_once: true diff --git a/roles/hdfs_upgrade/tasks/upgrade.yml b/roles/hdfs_upgrade/tasks/upgrade.yml index 79d0570a..82008c5c 100644 --- a/roles/hdfs_upgrade/tasks/upgrade.yml +++ b/roles/hdfs_upgrade/tasks/upgrade.yml @@ -66,9 +66,14 @@ hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_33 }}" when: transparency_33_enabled|bool + - name: + set_fact: + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_322 }}" + when: transparency_322_enabled|bool + - name: upgrade | Fetch hdfs rpm dir path for rhel set_fact: - hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" + hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" run_once: true delegate_to: localhost From b8bd178e981ce749daf3ba47bc0038d6236f98ad Mon Sep 17 00:00:00 2001 From: Dherendra Singh Date: Wed, 9 Mar 2022 15:53:05 +0530 Subject: [PATCH 047/113] Adding `loop_var` to avoid warning in nanenode start --- roles/hdfs_configure/tasks/configure.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/hdfs_configure/tasks/configure.yml b/roles/hdfs_configure/tasks/configure.yml index 911099bc..e489bc59 100644 --- a/roles/hdfs_configure/tasks/configure.yml +++ b/roles/hdfs_configure/tasks/configure.yml @@ -328,8 +328,10 @@ command: /usr/lpp/mmfs/bin/mmces service enable HDFS - name: Start Namenodes - shell: /usr/lpp/mmfs/bin/mmces service start HDFS -N "{{ item }}" + shell: /usr/lpp/mmfs/bin/mmces service start HDFS -N "{{ nn_items }}" with_items: "{{ scale_hdfs_namenodes_list }}" + loop_control: + loop_var: nn_items - name: Check Namenodes running status shell: /usr/lpp/mmfs/hadoop/sbin/mmhdfs hdfs-nn status | grep 'namenode pid is' | wc -l From b12700498b69ba3da6fdfd1535632bcfde5c41bb Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 11 Mar 2022 12:09:24 +0100 Subject: [PATCH 048/113] Fix link to samples and add details for creating multiple clusters Signed-off-by: Achim Christ --- README.md | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1ec65bc2..62cc52f9 100644 --- a/README.md +++ b/README.md @@ -314,14 +314,33 @@ Note that [Core GPFS](roles/core) is the only mandatory role, all other roles ar - Configure Protocol Services (OBJECT) (see [samples/playbook_ces_object.yml](samples/playbook_ces_object.yml)) - Configure Call Home (see [samples/playbook_callhome.yml](samples/playbook_callhome.yml)) - Configure File Audit Logging (see [samples/playbook_fileauditlogging.yml](samples/playbook_fileauditlogging.yml)) -- Configure cluster with daemon and admin network (see samples/daemon_admin_network) +- Configure cluster with daemon and admin network (see [samples/daemon_admin_network](samples/daemon_admin_network)) +- Configure remotely mounted filesystems (see [samples/playbook_remote_mount.yml](samples/playbook_remote_mount.yml)) + Cluster Membership ------------------ All hosts in the play are configured as nodes in the same Spectrum Scale cluster. If you want to add hosts to an existing cluster then add at least one node from that existing cluster to the play. -You can create multiple clusters by running multiple plays. +You can create multiple clusters by running multiple plays. Note that you will need to [reload the inventory](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/meta_module.html) to clear dynamic groups added by the Spectrum Scale roles: + +```yaml +- name: Create one cluster + hosts: cluster01 + roles: + ... +- name: Refresh inventory to clear dynamic groups + hosts: localhost + connection: local + gather_facts: false + tasks: + - meta: refresh_inventory +- name: Create another cluster + hosts: cluster02 + roles: + ... +``` Limitations From 3cb4b6b5198b876e8c254e78055de500118f8f1d Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 15 Mar 2022 06:12:32 +0100 Subject: [PATCH 049/113] Use scale_daemon_nodename for perfmon configuration Signed-off-by: Achim Christ --- roles/perfmon_configure/tasks/configure.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/perfmon_configure/tasks/configure.yml b/roles/perfmon_configure/tasks/configure.yml index f17566f6..04a620c8 100644 --- a/roles/perfmon_configure/tasks/configure.yml +++ b/roles/perfmon_configure/tasks/configure.yml @@ -88,7 +88,7 @@ - name: configure | Check before enable nodes for performance collection #TODO: Only checks first node for perfmon. vars: sensor_nodes: "{{ ansible_play_hosts | list }}" - shell: "/usr/lpp/mmfs/bin/mmlscluster -Y | grep -v HEADER | grep clusterNode |grep {{ sensor_nodes | first }} | cut -d ':' -f 14" + shell: "/usr/lpp/mmfs/bin/mmlscluster -Y | grep -v HEADER | grep clusterNode | grep {{ sensor_nodes | map('extract', hostvars, 'scale_daemon_nodename') | first }} | cut -d ':' -f 14" register: scale_zimon_conf_perfmon_check run_once: true failed_when: false @@ -109,7 +109,7 @@ - name: configure | Enable nodes for performance collection #TODO discuss: should it be dependent on scale_zimon_collector? vars: sensor_nodes: "{{ ansible_play_hosts | list }}" - command: /usr/lpp/mmfs/bin/mmchnode --perfmon -N {{ sensor_nodes | join(',') }} + command: /usr/lpp/mmfs/bin/mmchnode --perfmon -N {{ sensor_nodes | map('extract', hostvars, 'scale_daemon_nodename') | join(',') }} async: 45 poll: 5 register: scale_zimon_conf_enable_node_perfmon From 9a913a354f46c88d5ffc5fa1ac1a1e0e27d5a3b9 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 5 Apr 2022 06:11:06 +0200 Subject: [PATCH 050/113] Minor wording --- MIGRATING.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/MIGRATING.md b/MIGRATING.md index 296a8fb7..53fb9289 100644 --- a/MIGRATING.md +++ b/MIGRATING.md @@ -1,10 +1,10 @@ # Migrating from master to main -This Git repository has two branches: `master`, which is now stable, and `main`, which is where new functionality will be implemented. Your playbooks need to be adjusted when switching from one branch to the other, and these adjustments are outlined in this document. +This Git repository has two branches: `master`, which is now stable, and `main`, which is where new functionality will be implemented. Your playbooks need to be adjusted when switching from one branch to the other — and these adjustments are outlined in this document. ## What's changing? -A long term goal of this project is to make the code available through [Ansible Galaxy](https://galaxy.ansible.com/). It became clear that changes to the project's directory structure would be inevitable to follow the conventions imposed by Galaxy (i.e. [Collections format](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html)) — and this was taken as an opportunity to also rename all existing roles and some variables for consistency. See [#570](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/570), [#572](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/572), and [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details. +A long-term goal of this project is to publish the code through [Ansible Galaxy](https://galaxy.ansible.com/). It became clear that changes to the project's directory structure would be inevitable to follow the conventions imposed by Galaxy (i.e. [Collections format](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html)) — and this was taken as an opportunity to also rename all existing roles and some variables for consistency. See [#570](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/570), [#572](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/572), and [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details. All playbooks using the Ansible roles provided by this project need to adapt this new naming scheme, in order to use the latest updates implemented in the `main` branch. @@ -65,7 +65,7 @@ If you have existing playbooks which reference roles provided by this project, a $ ./migrate.sh playbook.yml ``` -Note that the script will create a backup of the file prior to making any changes. Further note that the script does not perform any kind of syntax checking — so you will need to manually verify that the resulting code is syntactically correct. +Note that the script will create a backup of the file prior to making any changes. Further note that the script does not perform any kind of syntax checking, so you will need to manually verify that the resulting code is syntactically correct. ## What if I need help? From 03057e3c2a596b0c754f1b54f2046f7cf3d17017 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 5 Apr 2022 06:16:48 +0200 Subject: [PATCH 051/113] Add reference to MIGRATING.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 1ec65bc2..53bf6933 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +__Important__: You are viewing the `main` branch of this repository. If you've previously used the `master` branch in your own playbooks then you will need to make some changes in order to switch to the `main` branch. See [MIGRATING.md](MIGRATING.md) for details. + +* * * + IBM Spectrum Scale (GPFS) Deployment using Ansible Roles ======================================================== From e84e93f045b78dbcd260439841300ec30ac0eb3e Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Fri, 18 Mar 2022 15:28:11 +0530 Subject: [PATCH 052/113] NSD active logic check to compare inventory defined nsd with system exist nsd Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/core_configure/tasks/storage.yml | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/roles/core_configure/tasks/storage.yml b/roles/core_configure/tasks/storage.yml index ade649e0..4cd055ac 100644 --- a/roles/core_configure/tasks/storage.yml +++ b/roles/core_configure/tasks/storage.yml @@ -100,9 +100,24 @@ - item.size > 1 with_items: "{{ scale_storage_stanzafile_new.results }}" - - name: storage | Wait for NSD configuration to be synced across cluster - wait_for: - timeout: 30 + - block: + - debug: + msg: Wait for 60 second for NSD configuration to be synced across cluster. Please be patient... + + - name: storage | Wait for NSD configuration to be synced across cluster + wait_for: + timeout: 60 + + - name: storage | wait-nsd-active + shell: /usr/lpp/mmfs/bin/mmlsnsd -a -Y | grep -v HEADER | cut -d ':' -f 8 + register: scale_existig_nsd_list + until: + - ((scale_existig_nsd_list.stdout_lines) | length) >= (scale_storage_nsddefs | unique | length) + retries: 12 + delay: 20 + changed_when: false + when: scale_storage_nsddefs | length > 0 + run_once: true # # Create new filesystems From b4f917b935104466874d9f448153b4574317360f Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Tue, 12 Apr 2022 16:42:06 +0200 Subject: [PATCH 053/113] Explicitly reference `main` branch Signed-off-by: Achim Christ --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53bf6933..c847c8fc 100644 --- a/README.md +++ b/README.md @@ -161,7 +161,7 @@ Installation Instructions ```shell $ mkdir my_project $ cd my_project - $ git clone https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale + $ git clone -b main https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale ``` Be sure to clone the project under the correct subdirectory: From e107ba60e8045d3126ebc2bd907e796d56bae69b Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 7 Apr 2022 19:20:42 +0530 Subject: [PATCH 054/113] UBUNTU 22.04 zimon collector directory change --- .../tasks/install_repository.yml | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/roles/perfmon_install/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml index c3ab021e..c711ce84 100644 --- a/roles/perfmon_install/tasks/install_repository.yml +++ b/roles/perfmon_install/tasks/install_repository.yml @@ -7,6 +7,7 @@ - name: Initialize set_fact: scale_zimon_url: "" + scale_zimon_collector_url: "" - name: install | zimon path set_fact: @@ -31,8 +32,26 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + +- name: install | zimon path + set_fact: + scale_zimon_collector_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -60,8 +79,8 @@ - name: install | Configure zimon APT repository apt_repository: - filename: spectrum-scale-zimon-debs - repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ scale_zimon_url }} ./" + filename: "{{ item.key }}" + repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ item.value }} ./" validate_certs: no state: present update_cache: yes @@ -71,6 +90,9 @@ - ansible_pkg_mgr == 'apt' - scale_install_repository_url is defined - scale_install_repository_url != 'existing' + with_dict: + spectrum-scale-zimon-debs: "{{ scale_zimon_url }}" + spectrum-scale-zimon-collector-debs: "{{ scale_zimon_collector_url }}" - name: install | Configure ZIMon repository zypper_repository: From ec1645435cbc5a0c72f93bb7b60c4dcfc03884b4 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 7 Apr 2022 20:56:39 +0530 Subject: [PATCH 055/113] UBUNTU22.04 upgrade support code for zimon --- .../tasks/install_repository.yml | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index abd705c8..7db1567b 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -7,6 +7,7 @@ - name: Initialize set_fact: scale_zimon_url: "" + scale_zimon_collector_url: "" is_scale_collector_pkg_installed: false is_scale_pmswift_pkg_installed: false @@ -33,8 +34,26 @@ - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + +- name: install | zimon path + set_fact: + scale_zimon_collector_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -62,8 +81,8 @@ - name: upgrade | Configure zimon APT repository apt_repository: - filename: spectrum-scale-zimon-debs - repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ scale_zimon_url }} ./" + filename: "{{ item.key }}" + repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ item.value }} ./" validate_certs: no state: present update_cache: yes @@ -73,6 +92,9 @@ - ansible_pkg_mgr == 'apt' - scale_install_repository_url is defined - scale_install_repository_url != 'existing' + with_dict: + spectrum-scale-zimon-debs: "{{ scale_zimon_url }}" + spectrum-scale-zimon-collector-debs: "{{ scale_zimon_collector_url }}" - name: upgrade | Configure ZIMon repository zypper_repository: From cbc50ddc05e3e39caa06edf85d66612cc52073b7 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 21 Apr 2022 00:22:01 +0530 Subject: [PATCH 056/113] Optional prereqs package install logic in the core/precheck role Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/core_configure/tasks/storage.yml | 4 +- roles/core_prepare/defaults/main.yml | 6 ++ roles/core_prepare/tasks/prepare.yml | 36 +++++++++++ .../remotemount_configure/tasks/precheck.yml | 59 +++++++++++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-) diff --git a/roles/core_configure/tasks/storage.yml b/roles/core_configure/tasks/storage.yml index 4cd055ac..db1f6238 100644 --- a/roles/core_configure/tasks/storage.yml +++ b/roles/core_configure/tasks/storage.yml @@ -102,11 +102,11 @@ - block: - debug: - msg: Wait for 60 second for NSD configuration to be synced across cluster. Please be patient... + msg: Wait for 240 second for NSD configuration to be synced across cluster. Please be patient... - name: storage | Wait for NSD configuration to be synced across cluster wait_for: - timeout: 60 + timeout: 240 - name: storage | wait-nsd-active shell: /usr/lpp/mmfs/bin/mmlsnsd -a -Y | grep -v HEADER | cut -d ':' -f 8 diff --git a/roles/core_prepare/defaults/main.yml b/roles/core_prepare/defaults/main.yml index 336d2e10..05a3dbbe 100644 --- a/roles/core_prepare/defaults/main.yml +++ b/roles/core_prepare/defaults/main.yml @@ -96,6 +96,12 @@ scale_build_gplsrc_prereqs: - kernel-devel - make +## List of optional prereq package to install +scale_prereqs_package: + - numactl + +## List of optional prereq package to install flag +scale_install_prereqs_packages: false ## Default cluster name scale_cluster_clustername: gpfs1.local diff --git a/roles/core_prepare/tasks/prepare.yml b/roles/core_prepare/tasks/prepare.yml index 9fd9a321..dbefa4e5 100644 --- a/roles/core_prepare/tasks/prepare.yml +++ b/roles/core_prepare/tasks/prepare.yml @@ -119,3 +119,39 @@ name: yum-utils state: present when: ansible_pkg_mgr == 'yum' + +- block: ## when: scale_install_prereqs_packages is defined + - name: prepare | Install prerequisite packages + yum: + name: "{{ scale_prereqs_package }}" + state: present + disable_excludes: all + when: ansible_pkg_mgr == 'yum' + register: scale_gpl_yum_result + retries: 10 + until: scale_gpl_yum_result is success + delay: 20 + + - name: prepare | Install prerequisite packages + dnf: + name: "{{ scale_prereqs_package }}" + state: present + disable_excludes: all + when: ansible_pkg_mgr == 'dnf' + register: scale_gpl_dnf_result + retries: 10 + until: scale_gpl_dnf_result is success + delay: 20 + + - name: prepare | Install prerequisite packages + apt: + name: "{{ scale_prereqs_package }}" + state: present + when: ansible_pkg_mgr == 'apt' + + - name: prepare | Install prerequisite packages + zypper: + name: "{{ scale_prereqs_package }}" + state: present + when: ansible_pkg_mgr == 'zypper' + when: scale_install_prereqs_packages | bool diff --git a/roles/remotemount_configure/tasks/precheck.yml b/roles/remotemount_configure/tasks/precheck.yml index 94924018..88227f13 100644 --- a/roles/remotemount_configure/tasks/precheck.yml +++ b/roles/remotemount_configure/tasks/precheck.yml @@ -64,3 +64,62 @@ msg: "item.scale_remotemount_storage_filesystem_name is not defined" when: item.scale_remotemount_storage_filesystem_name is undefined loop: "{{ scale_remotemount_filesystem_name }}" + + +# This block is for systems with Scale GUI/RESTAPI on both Accessing cluster and Remote Cluster. +# +- block: # RESTAPI - when: scale_remotemount_client_no_gui == false + - name: Main | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI + uri: + validate_certs: "{{ validate_certs_uri }}" + force_basic_auth: yes + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + method: GET + user: "{{ scale_remotemount_storage_gui_username }}" + password: "{{ scale_remotemount_storage_gui_password }}" + body_format: json + status_code: + - 200 + register: storage_cluster_status + until: + - storage_cluster_status.status == 200 + retries: 15 + delay: 30 + changed_when: false + + + - name: Main | Storage Cluster (owner) | Conenction Refused Storage Cluster + run_once: True + fail: + msg: "There is issues connection to GUI/RestAPI, http return code: {{ storage_cluster_status.status }}" + when: + - storage_cluster_status.status != 200 + + - name: Main | Client Cluster (access) | Check Connectivity to Client Cluster GUI + uri: + validate_certs: "{{ validate_certs_uri }}" + force_basic_auth: yes + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + status_code: + - 200 + register: access_cluster_status + until: + - access_cluster_status.status == 200 + retries: 15 + delay: 30 + changed_when: false + + - name: Main | Client Cluster (access) | Conenction Refused Client Cluster + run_once: True + fail: + msg: "There is issues connection to GUI/RestAPI, http return code: {{ access_cluster_status.status }}" + when: + - access_cluster_status.status != 200 + + when: + - scale_remotemount_client_no_gui == false + From 310bdfdf5799f0e38c3c303d614a63fc45b03d51 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Mon, 25 Apr 2022 22:42:27 +0530 Subject: [PATCH 057/113] UBUNTU 22 support for SMB Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/nfs_install/tasks/install_repository.yml | 14 ++++++++++++++ roles/smb_install/tasks/install_repository.yml | 16 +++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index adedb4e1..3e5b4727 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -44,6 +44,20 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version > '20' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/rhel7/' diff --git a/roles/smb_install/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml index 312cc102..a37c264f 100644 --- a/roles/smb_install/tasks/install_repository.yml +++ b/roles/smb_install/tasks/install_repository.yml @@ -32,7 +32,21 @@ - name: install | smb path set_fact: scale_smb_url: 'smb_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version > '20' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" - name: install | Configure smb YUM repository yum_repository: From 1d30f7eea834b50ab721d7d1e2ffae01e62f07ff Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Mon, 25 Apr 2022 22:50:54 +0530 Subject: [PATCH 058/113] Fixed type issue fix for ubuntu22 --- roles/nfs_install/tasks/install_repository.yml | 2 +- roles/smb_install/tasks/install_repository.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index 3e5b4727..3020389b 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -53,7 +53,7 @@ - name: install | smb path set_fact: scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' - when: ansible_distribution_major_version > '20' + when: ansible_distribution_major_version == '22' when: - ansible_distribution in scale_ubuntu_distribution - scale_version >= "5.1.4.0" diff --git a/roles/smb_install/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml index a37c264f..8cc96c3f 100644 --- a/roles/smb_install/tasks/install_repository.yml +++ b/roles/smb_install/tasks/install_repository.yml @@ -43,7 +43,7 @@ - name: install | smb path set_fact: scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' - when: ansible_distribution_major_version > '20' + when: ansible_distribution_major_version == '22' when: - ansible_distribution in scale_ubuntu_distribution - scale_version >= "5.1.4.0" From 53d774e36926eb79fa8b8ea0b19640317ed35536 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 26 Apr 2022 17:20:20 +0530 Subject: [PATCH 059/113] UBUNTU 22 fix for zimon Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- .../tasks/install_repository.yml | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/roles/perfmon_install/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml index c711ce84..66f32969 100644 --- a/roles/perfmon_install/tasks/install_repository.yml +++ b/roles/perfmon_install/tasks/install_repository.yml @@ -32,23 +32,18 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - -- name: install | zimon path - set_fact: - scale_zimon_collector_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' - name: install | zimon path set_fact: - scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu20/' + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' when: - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' - scale_version >= "5.1.4.0" - name: install | zimon path set_fact: - scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu22/' + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' when: - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' @@ -62,6 +57,16 @@ scale_zimon_url: 'zimon_rpms/sles15/' when: ansible_distribution in scale_sles_distribution and ansible_distribution_major_version == '15' +- name: install | remove existing zimon APT repository + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/apt/sources.list.d/spectrum-scale-pm-ganesha-debs.list + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: install | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon @@ -92,7 +97,6 @@ - scale_install_repository_url != 'existing' with_dict: spectrum-scale-zimon-debs: "{{ scale_zimon_url }}" - spectrum-scale-zimon-collector-debs: "{{ scale_zimon_collector_url }}" - name: install | Configure ZIMon repository zypper_repository: From 275209b32b8e22d85e415e6ba2182d1f88089a2b Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 26 Apr 2022 17:48:51 +0530 Subject: [PATCH 060/113] UBUNTU22 zimon upgrade issue Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/perfmon_upgrade/tasks/install_repository.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index 7db1567b..854a89b6 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -38,19 +38,19 @@ - name: install | zimon path set_fact: - scale_zimon_collector_url: 'zimon_debs/ubuntu/' + scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' - name: install | zimon path set_fact: - scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu20/' + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' when: - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' - scale_version >= "5.1.4.0" - name: install | zimon path set_fact: - scale_zimon_collector_url: 'zimon_debs/ubuntu/ubuntu22/' + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' when: - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' @@ -94,7 +94,6 @@ - scale_install_repository_url != 'existing' with_dict: spectrum-scale-zimon-debs: "{{ scale_zimon_url }}" - spectrum-scale-zimon-collector-debs: "{{ scale_zimon_collector_url }}" - name: upgrade | Configure ZIMon repository zypper_repository: From 6bfb38dd6fb1571ccb78b9d303c6ec4a5badad95 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 27 Apr 2022 16:52:07 +0530 Subject: [PATCH 061/113] UBUNTU 22 support for NFS package Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/nfs_install/tasks/install_repository.yml | 14 ++++++++++++++ roles/nfs_upgrade/tasks/install_repository.yml | 16 +++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index 3020389b..4e43299f 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -58,6 +58,20 @@ - ansible_distribution in scale_ubuntu_distribution - scale_version >= "5.1.4.0" +- block: + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/rhel7/' diff --git a/roles/nfs_upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml index d77016d2..e3045572 100644 --- a/roles/nfs_upgrade/tasks/install_repository.yml +++ b/roles/nfs_upgrade/tasks/install_repository.yml @@ -54,7 +54,21 @@ scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' -- name: install|configure nfs YUM repository +- block: + - name: upgrade | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: upgrade | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + +- name: upgrade | configure nfs YUM repository yum_repository: name: spectrum-scale-nfs-rpms description: IBM Spectrum Scale (NFS RPMS) From e4040d731668c52f9b502249cf7ee40f6d71b4f8 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 28 Apr 2022 21:19:10 +0530 Subject: [PATCH 062/113] UBUNTU 22 fixed for pm-ganesha Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/nfs_install/tasks/install_repository.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index 4e43299f..37fd5a58 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -54,11 +54,7 @@ set_fact: scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' when: ansible_distribution_major_version == '22' - when: - - ansible_distribution in scale_ubuntu_distribution - - scale_version >= "5.1.4.0" -- block: - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' @@ -100,7 +96,7 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - name: install|configure nfs YUM repository yum_repository: From fc3f026b9c3b3d23cf30a3b1d2ba37ea944c2022 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Mon, 2 May 2022 15:48:03 +0530 Subject: [PATCH 063/113] UBUNTu22 upgrade support Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/nfs_upgrade/tasks/install_repository.yml | 2 +- .../perfmon_upgrade/tasks/install_repository.yml | 10 ++++++++++ roles/smb_upgrade/tasks/install_repository.yml | 16 +++++++++++++++- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/roles/nfs_upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml index e3045572..32bafeed 100644 --- a/roles/nfs_upgrade/tasks/install_repository.yml +++ b/roles/nfs_upgrade/tasks/install_repository.yml @@ -52,7 +52,7 @@ - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - block: - name: upgrade | nfs path diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index 854a89b6..1ff8a58b 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -64,6 +64,16 @@ scale_zimon_url: 'zimon_rpms/sles15/' when: ansible_distribution in scale_sles_distribution and ansible_distribution_major_version == '15' +- name: install | remove existing zimon APT repository + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/apt/sources.list.d/spectrum-scale-pm-ganesha-debs.list + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: upgrade | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon diff --git a/roles/smb_upgrade/tasks/install_repository.yml b/roles/smb_upgrade/tasks/install_repository.yml index 0a9c2412..58b55509 100644 --- a/roles/smb_upgrade/tasks/install_repository.yml +++ b/roles/smb_upgrade/tasks/install_repository.yml @@ -32,7 +32,21 @@ - name: upgrade | smb path set_fact: scale_smb_url: 'smb_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" - name: upgrade | Configure smb YUM repository yum_repository: From c1c26faabcc85f38e1d8cdce1ff5a6a4f12b6634 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 4 May 2022 23:32:10 +0530 Subject: [PATCH 064/113] UBUNTU 22 support for local and remote method Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/nfs_install/tasks/install_local_pkg.yml | 26 ++++++++++++++++++- .../nfs_install/tasks/install_remote_pkg.yml | 26 ++++++++++++++++++- .../tasks/install_local_pkg.yml | 13 ++++++++++ .../tasks/install_remote_pkg.yml | 13 ++++++++++ roles/smb_install/tasks/install_local_pkg.yml | 14 ++++++++++ .../smb_install/tasks/install_remote_pkg.yml | 14 ++++++++++ 6 files changed, 104 insertions(+), 2 deletions(-) diff --git a/roles/nfs_install/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml index d43568af..8a696f59 100644 --- a/roles/nfs_install/tasks/install_local_pkg.yml +++ b/roles/nfs_install/tasks/install_local_pkg.yml @@ -155,7 +155,7 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - name: install | smb path set_fact: @@ -172,6 +172,30 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + # Find nfs rpms - block: ## when: host is defined as a protocol node diff --git a/roles/nfs_install/tasks/install_remote_pkg.yml b/roles/nfs_install/tasks/install_remote_pkg.yml index 56472bee..a861079f 100644 --- a/roles/nfs_install/tasks/install_remote_pkg.yml +++ b/roles/nfs_install/tasks/install_remote_pkg.yml @@ -129,7 +129,7 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - name: install | smb path set_fact: @@ -146,6 +146,30 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - block: ## when: host is defined as a protocol node - name: install | Find gpfs.smb (gpfs.smb) package diff --git a/roles/perfmon_install/tasks/install_local_pkg.yml b/roles/perfmon_install/tasks/install_local_pkg.yml index e4ae8fa8..c38bb80a 100644 --- a/roles/perfmon_install/tasks/install_local_pkg.yml +++ b/roles/perfmon_install/tasks/install_local_pkg.yml @@ -136,6 +136,19 @@ scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/perfmon_install/tasks/install_remote_pkg.yml b/roles/perfmon_install/tasks/install_remote_pkg.yml index 2113459e..62af260f 100644 --- a/roles/perfmon_install/tasks/install_remote_pkg.yml +++ b/roles/perfmon_install/tasks/install_remote_pkg.yml @@ -110,6 +110,19 @@ scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/smb_install/tasks/install_local_pkg.yml b/roles/smb_install/tasks/install_local_pkg.yml index 4e7666a0..358354da 100644 --- a/roles/smb_install/tasks/install_local_pkg.yml +++ b/roles/smb_install/tasks/install_local_pkg.yml @@ -133,6 +133,20 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + # Find smb rpms - block: ## when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution diff --git a/roles/smb_install/tasks/install_remote_pkg.yml b/roles/smb_install/tasks/install_remote_pkg.yml index 3b9ada75..6f9f18d1 100644 --- a/roles/smb_install/tasks/install_remote_pkg.yml +++ b/roles/smb_install/tasks/install_remote_pkg.yml @@ -106,6 +106,20 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + # Find smb rpms - block: ## when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution From ff30086a2281b82b5a1f47076947404ea26a43a4 Mon Sep 17 00:00:00 2001 From: Dherendra Singh Date: Mon, 9 May 2022 11:22:08 +0530 Subject: [PATCH 065/113] [TS009168275] HDFS Transparency is not installed properly --- roles/hdfs_install/tasks/yum/install.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/hdfs_install/tasks/yum/install.yml b/roles/hdfs_install/tasks/yum/install.yml index a2921e6d..32137175 100644 --- a/roles/hdfs_install/tasks/yum/install.yml +++ b/roles/hdfs_install/tasks/yum/install.yml @@ -4,4 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_protocol_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list or ansible_fqdn in scale_protocol_nodes_list or inventory_hostname in scale_protocol_nodes_list From 43a74a4aa13d869f607fc78f15a51bad2b129203 Mon Sep 17 00:00:00 2001 From: Dherendra Singh Date: Tue, 10 May 2022 19:35:43 +0530 Subject: [PATCH 066/113] [TS009168275] HDFS Transparency is not installed properly --- roles/hdfs_configure/tasks/env_setup.yml | 6 +++--- roles/hdfs_prepare/tasks/java_home.yml | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/hdfs_configure/tasks/env_setup.yml b/roles/hdfs_configure/tasks/env_setup.yml index b090ebc7..cc099e5d 100644 --- a/roles/hdfs_configure/tasks/env_setup.yml +++ b/roles/hdfs_configure/tasks/env_setup.yml @@ -38,7 +38,7 @@ state: present line: "export JAVA_HOME={{ javahome_path }}" when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: "env_setup | HDFS and GPFS bin to PATH" lineinfile: @@ -46,7 +46,7 @@ state: present line: 'export PATH=$PATH:$JAVA_HOME/bin:/usr/lpp/mmfs/bin:/usr/lpp/mmfs/hadoop/sbin:/usr/lpp/mmfs/hadoop/bin' when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: "env_setup | ulimit tunning" lineinfile: @@ -61,4 +61,4 @@ loop_control: loop_var: limit_items when: - - ansible_fqdn in scale_hdfs_nodes_list \ No newline at end of file + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list \ No newline at end of file diff --git a/roles/hdfs_prepare/tasks/java_home.yml b/roles/hdfs_prepare/tasks/java_home.yml index d9e44391..08a11d62 100644 --- a/roles/hdfs_prepare/tasks/java_home.yml +++ b/roles/hdfs_prepare/tasks/java_home.yml @@ -34,36 +34,36 @@ - name: check | Fetch JAVA_HOME path shell: echo $JAVA_HOME register: java_path - when: ansible_fqdn in scale_hdfs_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | Check JAVA_HOME path exist stat: path: "{{ java_path.stdout }}" register: java_path_details - when: ansible_fqdn in scale_hdfs_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | Assert JAVA_HOME path exist assert: that: - java_path_details.stat.exists fail_msg: The JAVA_HOME path does not exists ! - when: ansible_fqdn in scale_hdfs_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | Set path of JAVA_HOME set_fact: javahome_path: "{{ java_path.stdout }}" when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | verify JAVA command: "ls {{ javahome_path }}/bin/java" register: jvm_list when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - javahome_path|length > 0 - fail: msg: "JAVA_HOME not set properly" when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - jvm_list.rc != 0 \ No newline at end of file From bd4de1c861240bb28399c2998e4f4dafdfedb513 Mon Sep 17 00:00:00 2001 From: Sujeet Jha Date: Thu, 12 May 2022 00:37:11 +0530 Subject: [PATCH 067/113] Update install_repository.yml ubuntu20 and ubuntu22 support code for librdkafka package --- roles/fal_install/tasks/install_repository.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/fal_install/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml index bc26b082..2bf2129a 100644 --- a/roles/fal_install/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -20,8 +20,13 @@ - name: install | file audit logging path set_fact: - scale_fal_url: 'gpfs_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' - name: install | file audit logging path set_fact: From b4a13fd9477528e00644f5fd5fe060ea304d7569 Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Fri, 13 May 2022 20:58:10 +0530 Subject: [PATCH 068/113] Update install_repository.yml FAL code for ub22 new directory structure --- roles/fal_upgrade/tasks/install_repository.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/fal_upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml index 075ac24c..d741cea4 100644 --- a/roles/fal_upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -27,8 +27,13 @@ - name: upgrade | file audit logging path set_fact: - scale_fal_url: 'gpfs_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' - name: upgrade | file audit logging path set_fact: From f112a4d72b77156c5ddedb2af95739b13a9a92b9 Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Fri, 13 May 2022 21:02:56 +0530 Subject: [PATCH 069/113] Update install_remote_pkg.yml --- roles/fal_upgrade/tasks/install_remote_pkg.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/fal_upgrade/tasks/install_remote_pkg.yml b/roles/fal_upgrade/tasks/install_remote_pkg.yml index 276146da..3cce27fe 100644 --- a/roles/fal_upgrade/tasks/install_remote_pkg.yml +++ b/roles/fal_upgrade/tasks/install_remote_pkg.yml @@ -92,8 +92,13 @@ - name: upgrade | file audit logging path set_fact: - scale_fal_url: 'gpfs_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' - name: upgrade | file audit logging path set_fact: From 132372142f7b6d290f1c15fb312efb465f848d0a Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Fri, 13 May 2022 21:04:09 +0530 Subject: [PATCH 070/113] Update install_local_pkg.yml --- roles/fal_upgrade/tasks/install_local_pkg.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/fal_upgrade/tasks/install_local_pkg.yml b/roles/fal_upgrade/tasks/install_local_pkg.yml index dc6c35ea..b03d149f 100644 --- a/roles/fal_upgrade/tasks/install_local_pkg.yml +++ b/roles/fal_upgrade/tasks/install_local_pkg.yml @@ -104,8 +104,13 @@ - name: upgrade | file audit logging path set_fact: - scale_fal_url: 'gpfs_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' #todo wrong - name: upgrade | file audit logging path set_fact: From b09228b497943cd539bed3ea15b57ead58e2ff63 Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Mon, 16 May 2022 21:35:03 +0530 Subject: [PATCH 071/113] Update install_repository.yml u20 support code for scale_version < 5140 --- roles/fal_install/tasks/install_repository.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/fal_install/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml index 2bf2129a..71b96a99 100644 --- a/roles/fal_install/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -17,11 +17,18 @@ set_fact: scale_fal_url: 'gpfs_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" - name: install | file audit logging path set_fact: From 76b2bf5d986ccab00f12d311eb3aa0a776997c94 Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Mon, 16 May 2022 21:41:39 +0530 Subject: [PATCH 072/113] Update install_local_pkg.yml --- roles/fal_upgrade/tasks/install_local_pkg.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/fal_upgrade/tasks/install_local_pkg.yml b/roles/fal_upgrade/tasks/install_local_pkg.yml index b03d149f..99d3e904 100644 --- a/roles/fal_upgrade/tasks/install_local_pkg.yml +++ b/roles/fal_upgrade/tasks/install_local_pkg.yml @@ -101,11 +101,18 @@ set_fact: scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" - name: upgrade | file audit logging path set_fact: From 09ca00499e96dc4b8e87057aab61afba83f39a22 Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Mon, 16 May 2022 21:44:03 +0530 Subject: [PATCH 073/113] Update install_remote_pkg.yml --- roles/fal_upgrade/tasks/install_remote_pkg.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/fal_upgrade/tasks/install_remote_pkg.yml b/roles/fal_upgrade/tasks/install_remote_pkg.yml index 3cce27fe..31b0c0c7 100644 --- a/roles/fal_upgrade/tasks/install_remote_pkg.yml +++ b/roles/fal_upgrade/tasks/install_remote_pkg.yml @@ -89,11 +89,18 @@ set_fact: scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" - name: upgrade | file audit logging path set_fact: From 80bcb9f87bc82ea5f74a3689cd8ca9ed4498c36f Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Mon, 16 May 2022 21:45:18 +0530 Subject: [PATCH 074/113] Update install_repository.yml --- roles/fal_upgrade/tasks/install_repository.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/fal_upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml index d741cea4..2d62bdb1 100644 --- a/roles/fal_upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -25,10 +25,17 @@ scale_fal_url: 'gpfs_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution + - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" - name: upgrade | file audit logging path set_fact: From 8abcd4288292997f74ca53fbcc7b022182be1014 Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Mon, 16 May 2022 21:47:42 +0530 Subject: [PATCH 075/113] Update install_remote_pkg.yml --- roles/fal_install/tasks/install_remote_pkg.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/roles/fal_install/tasks/install_remote_pkg.yml b/roles/fal_install/tasks/install_remote_pkg.yml index 173ae206..7d29e7f0 100644 --- a/roles/fal_install/tasks/install_remote_pkg.yml +++ b/roles/fal_install/tasks/install_remote_pkg.yml @@ -94,6 +94,18 @@ set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' - name: install | file audit logging path set_fact: From e5335983d227a72744cc4aa070311dddaabec10f Mon Sep 17 00:00:00 2001 From: sujeet kumar jha Date: Mon, 16 May 2022 21:48:30 +0530 Subject: [PATCH 076/113] Update install_local_pkg.yml --- roles/fal_install/tasks/install_local_pkg.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/roles/fal_install/tasks/install_local_pkg.yml b/roles/fal_install/tasks/install_local_pkg.yml index ef0fd609..be482acc 100644 --- a/roles/fal_install/tasks/install_local_pkg.yml +++ b/roles/fal_install/tasks/install_local_pkg.yml @@ -106,6 +106,18 @@ set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' #todo wrong - name: install | file audit logging path set_fact: From 2ad05dbd851e12a83283407135644d5075555432 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 23 Nov 2021 23:14:02 +0530 Subject: [PATCH 077/113] Object Signing requirement fix and adding protocol node into existing object enabled cluster issue fix Signed-off-by: Rajan Mishra rajanmis@in.ibm.com Conflicts: roles/scale_object/node/meta/main.yml roles/scale_object/upgrade/meta/main.yml --- roles/ces_common/tasks/configure.yml | 28 +++++++++++++++++++ roles/obj_install/tasks/install_pmswift.yml | 9 ++++++ .../obj_install/tasks/install_repository.yml | 8 ++++++ roles/obj_install/vars/main.yml | 4 +++ .../obj_upgrade/tasks/install_repository.yml | 8 ++++++ roles/obj_upgrade/vars/main.yml | 4 +++ 6 files changed, 61 insertions(+) diff --git a/roles/ces_common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml index 7374a265..37a3d4cf 100644 --- a/roles/ces_common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -100,6 +100,30 @@ ( scale_service_status.rc == 0 ) run_once: true +- name: configure | Check if OBJ is running + shell: + cmd: "{{ scale_command_path }}mmces service list|grep OBJ" + register: scale_service_status + when: (ansible_fqdn in scale_protocol_node_list) or + (inventory_hostname in scale_protocol_node_list) + ignore_errors: true + failed_when: false + run_once: true + +- name: configure | Add OBJ service to the list + set_fact: + scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('OBJ')] }}" + when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + ( scale_service_status.rc > 0 ) + run_once: true + +- name: configure | Add OBJ service to the list + set_fact: + scale_service_list: "{{ scale_service_list + ['OBJ'] }}" + when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + ( scale_service_status.rc == 0 ) + run_once: true + - import_role: name: ibm.spectrum_scale.nfs_install when: scale_ces_disabled_nodes|length > 0 and 'NFS' in scale_service_list @@ -108,6 +132,10 @@ name: ibm.spectrum_scale.smb_install when: scale_ces_disabled_nodes|length > 0 and 'SMB' in scale_service_list +- import_role: + name: scale_object/node + when: scale_ces_disabled_nodes|length > 0 and 'OBJ' in scale_service_list + - name: configure | Prepare ces nodes string set_fact: scale_ces_nodes: "{{ scale_ces_nodes + ',' + item|string }}" diff --git a/roles/obj_install/tasks/install_pmswift.yml b/roles/obj_install/tasks/install_pmswift.yml index fe776db5..7f8f2fbc 100644 --- a/roles/obj_install/tasks/install_pmswift.yml +++ b/roles/obj_install/tasks/install_pmswift.yml @@ -12,14 +12,23 @@ scale_zimon_url: 'zimon_rpms/rhel8' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + - name: install | pmswift path + set_fact: + scale_gpg_key_path: + - "{{ scale_gpgKey_repository_obj_src }}" + - "{{ scale_gpgKey_repository_src }}" + when: scale_version >= "5.1.2.2" + - name: install | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon description: IBM Spectrum Scale (ZIMon) baseurl: "{{ scale_install_repository_url }}{{ scale_zimon_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" + gpgkey: "{{ scale_gpg_key_path }}" repo_gpgcheck: no state: present + sslverify: false notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' diff --git a/roles/obj_install/tasks/install_repository.yml b/roles/obj_install/tasks/install_repository.yml index f8281978..2e286789 100644 --- a/roles/obj_install/tasks/install_repository.yml +++ b/roles/obj_install/tasks/install_repository.yml @@ -4,12 +4,20 @@ scale_obj_url: 'object_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | pmswift path + set_fact: + scale_gpg_key_path: + - "{{ scale_gpgKey_repository_obj_src }}" + - "{{ scale_gpgKey_repository_src }}" + when: scale_version >= "5.1.2.2" + - name: install | configure object YUM repository yum_repository: name: spectrum-scale-object-rpms description: IBM Spectrum Scale (object RPMS) baseurl: "{{ scale_install_repository_url }}{{ scale_obj_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" + gpgkey: "{{ scale_gpg_key_path }}" repo_gpgcheck: no sslverify: no state: present diff --git a/roles/obj_install/vars/main.yml b/roles/obj_install/vars/main.yml index 5a6e9c01..e3f1da5d 100644 --- a/roles/obj_install/vars/main.yml +++ b/roles/obj_install/vars/main.yml @@ -8,3 +8,7 @@ scale_rpmversion: "{{ scale_version | regex_replace('^([0-9.]+)\\.([0-9])$', '\\ ## Default scale extraction path scale_extracted_default_path: "/usr/lpp/mmfs" scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" + +scale_gpg_key_path: "{{ scale_gpgKey_repository_src }}" + +scale_gpgKey_repository_obj_src: "{{ scale_install_repository_url }}Public_Keys/RPM-GPG-KEY-redhat-release" diff --git a/roles/obj_upgrade/tasks/install_repository.yml b/roles/obj_upgrade/tasks/install_repository.yml index 7b3d210e..c553d4e8 100644 --- a/roles/obj_upgrade/tasks/install_repository.yml +++ b/roles/obj_upgrade/tasks/install_repository.yml @@ -4,12 +4,20 @@ scale_obj_url: 'object_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | pmswift path + set_fact: + scale_gpg_key_path: + - "{{ scale_gpgKey_repository_obj_src }}" + - "{{ scale_gpgKey_repository_src }}" + when: scale_version >= "5.1.2.2" + - name: upgrade | configure object YUM repository yum_repository: name: spectrum-scale-object-rpms description: IBM Spectrum Scale (object RPMS) baseurl: "{{ scale_install_repository_url }}{{ scale_obj_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" + gpgkey: "{{ scale_gpg_key_path }}" repo_gpgcheck: no sslverify: no state: present diff --git a/roles/obj_upgrade/vars/main.yml b/roles/obj_upgrade/vars/main.yml index 5a6e9c01..e3f1da5d 100644 --- a/roles/obj_upgrade/vars/main.yml +++ b/roles/obj_upgrade/vars/main.yml @@ -8,3 +8,7 @@ scale_rpmversion: "{{ scale_version | regex_replace('^([0-9.]+)\\.([0-9])$', '\\ ## Default scale extraction path scale_extracted_default_path: "/usr/lpp/mmfs" scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" + +scale_gpg_key_path: "{{ scale_gpgKey_repository_src }}" + +scale_gpgKey_repository_obj_src: "{{ scale_install_repository_url }}Public_Keys/RPM-GPG-KEY-redhat-release" From 05c54b82af005e2701b18cc3ab7a6a14c5c93b1b Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 1 Jun 2022 14:01:20 +0530 Subject: [PATCH 078/113] Object directory import fix with namespace Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/ces_common/tasks/configure.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ces_common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml index 37a3d4cf..2f6b5e3f 100644 --- a/roles/ces_common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -133,7 +133,7 @@ when: scale_ces_disabled_nodes|length > 0 and 'SMB' in scale_service_list - import_role: - name: scale_object/node + name: ibm.spectrum_scale.obj_install when: scale_ces_disabled_nodes|length > 0 and 'OBJ' in scale_service_list - name: configure | Prepare ces nodes string From c29b9af578072e20c634e46473186d8893de0b12 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 14 Jun 2022 14:33:02 +0530 Subject: [PATCH 079/113] ECE fix for setsize Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/ece_configure/tasks/create_vdisk.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ece_configure/tasks/create_vdisk.yml b/roles/ece_configure/tasks/create_vdisk.yml index 6508ca50..98a0f2c3 100644 --- a/roles/ece_configure/tasks/create_vdisk.yml +++ b/roles/ece_configure/tasks/create_vdisk.yml @@ -13,7 +13,7 @@ current_code: "{{ item.ec }}" current_bs: "{{ item.blocksize }}" current_size: "{{ item.Size }}" - command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }}%" + command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }}" register: scale_vs_define failed_when: scale_vs_define.rc != 0 when: From c69d1131486c22d78b8ca522413ed8dcc854f8a8 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 16 Jun 2022 18:35:24 +0530 Subject: [PATCH 080/113] FAL issue for RHEL9 Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/fal_install/tasks/install_repository.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/fal_install/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml index 71b96a99..781d12d5 100644 --- a/roles/fal_install/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -16,7 +16,7 @@ - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_rpms/rhel8/' - when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version >= '8' - name: install | file audit logging path set_fact: From d7f919fc79474afb2f4bd5172bbcc93e6da1d3c5 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Fri, 17 Jun 2022 12:55:34 +0530 Subject: [PATCH 081/113] RHEL9 repo code for protocol Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/nfs_install/tasks/install_repository.yml | 10 ++++++++++ roles/perfmon_install/tasks/install_repository.yml | 5 +++++ roles/smb_install/tasks/install_repository.yml | 5 +++++ 3 files changed, 20 insertions(+) diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index 37fd5a58..095f5e26 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu16/' @@ -78,6 +83,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/perfmon_install/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml index 66f32969..bb34c3c6 100644 --- a/roles/perfmon_install/tasks/install_repository.yml +++ b/roles/perfmon_install/tasks/install_repository.yml @@ -19,6 +19,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' diff --git a/roles/smb_install/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml index 8cc96c3f..cc240b7e 100644 --- a/roles/smb_install/tasks/install_repository.yml +++ b/roles/smb_install/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' From ce1e13b32fcee1096f161eaef67e8954bff54455 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 21 Jun 2022 11:56:10 +0530 Subject: [PATCH 082/113] Synced master object upgrade code with nextgen Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/obj_upgrade/meta/main.yml | 1 - roles/obj_upgrade/tasks/install_pmswift.yml | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/roles/obj_upgrade/meta/main.yml b/roles/obj_upgrade/meta/main.yml index 3bbbe418..4e20e076 100644 --- a/roles/obj_upgrade/meta/main.yml +++ b/roles/obj_upgrade/meta/main.yml @@ -17,4 +17,3 @@ galaxy_info: dependencies: - ibm.spectrum_scale.core_common - - ibm.spectrum_scale.ces_common diff --git a/roles/obj_upgrade/tasks/install_pmswift.yml b/roles/obj_upgrade/tasks/install_pmswift.yml index 16ba727a..1a145f3c 100644 --- a/roles/obj_upgrade/tasks/install_pmswift.yml +++ b/roles/obj_upgrade/tasks/install_pmswift.yml @@ -5,7 +5,7 @@ # Add pmswift rpm -- block: ## when: inventory_hostname in scale_obj_nodes_list +- block: ## when: inventory_hostname - name: upgrade | pmswift path set_fact: scale_obj_url: 'zimon_rpms/rhel8' @@ -30,5 +30,3 @@ set_fact: scale_install_all_packages: "{{ scale_install_all_packages + pmswift_package }}" when: scale_install_repository_url is undefined - - when: inventory_hostname in scale_obj_nodes_list From f54c7217c3c7442bf45bf852c464fddea07e26c0 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 21 Jun 2022 13:23:18 +0530 Subject: [PATCH 083/113] Upgradedirectory change for RHEL9 Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/fal_upgrade/tasks/install_repository.yml | 2 +- roles/nfs_upgrade/tasks/install_repository.yml | 7 ++++++- roles/perfmon_upgrade/tasks/install_repository.yml | 5 +++++ roles/smb_upgrade/tasks/install_repository.yml | 5 +++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/roles/fal_upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml index 2d62bdb1..708c208c 100644 --- a/roles/fal_upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -23,7 +23,7 @@ - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_rpms/rhel8/' - when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version >= '8' - name: upgrade | file audit logging path set_fact: diff --git a/roles/nfs_upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml index 32bafeed..b34eb3be 100644 --- a/roles/nfs_upgrade/tasks/install_repository.yml +++ b/roles/nfs_upgrade/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu/' @@ -32,7 +37,7 @@ - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_rpms/rhel8/' - when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version >= '8' - name: upgrade | zimon path set_fact: diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index 1ff8a58b..9ff97560 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -21,6 +21,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' diff --git a/roles/smb_upgrade/tasks/install_repository.yml b/roles/smb_upgrade/tasks/install_repository.yml index 58b55509..6b5e32b3 100644 --- a/roles/smb_upgrade/tasks/install_repository.yml +++ b/roles/smb_upgrade/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' From 7bff5b3640356515ea455503ef020f136bd7e6a9 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 22 Jun 2022 13:44:14 +0530 Subject: [PATCH 084/113] PMswift upgrade backport from master to nextgen Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/perfmon_upgrade/tasks/install_repository.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index 9ff97560..08b552ec 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -220,7 +220,7 @@ - pmswiftd when: - scale_pmswift_status.rc is defined and scale_pmswift_status.rc == 0 - when: + when: - (is_scale_pmswift_pkg_installed | bool) - name: upgrade | pmswift packages to list @@ -228,6 +228,8 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" with_items: - "{{ scale_obj_sensors_packages }}" - when: (is_scale_pmswift_pkg_installed | bool) + when: + - is_scale_pmswift_pkg_installed | bool + - scale_zimon_offline_upgrade is undefined when: - ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' From 922677622b6c41335ddf950658255da3c981db2c Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Fri, 24 Jun 2022 23:56:44 +0530 Subject: [PATCH 085/113] RHEL9 support code Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/fal_install/tasks/install_repository.yml | 7 ++++++- roles/fal_upgrade/tasks/install_repository.yml | 7 ++++++- roles/nfs_install/tasks/install_repository.yml | 4 ++-- roles/nfs_upgrade/tasks/install_repository.yml | 9 +++++++-- roles/perfmon_install/tasks/install_repository.yml | 2 +- roles/perfmon_upgrade/tasks/install_repository.yml | 2 +- roles/smb_install/tasks/install_repository.yml | 2 +- roles/smb_upgrade/tasks/install_repository.yml | 2 +- 8 files changed, 25 insertions(+), 10 deletions(-) diff --git a/roles/fal_install/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml index 781d12d5..45f9f350 100644 --- a/roles/fal_install/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -16,7 +16,12 @@ - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_rpms/rhel8/' - when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version >= '8' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: install | file audit logging path set_fact: diff --git a/roles/fal_upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml index 708c208c..8d07a61b 100644 --- a/roles/fal_upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -23,7 +23,12 @@ - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_rpms/rhel8/' - when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version >= '8' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: upgrade | file audit logging path set_fact: diff --git a/roles/nfs_install/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml index 095f5e26..987fef61 100644 --- a/roles/nfs_install/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -11,7 +11,7 @@ - name: install | nfs path set_fact: - scale_nfs_url: 'ganesha_rpms/rhel8/' + scale_nfs_url: 'ganesha_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: install | nfs path @@ -85,7 +85,7 @@ - name: install | zimon path set_fact: - scale_zimon_url: 'zimon_rpms/rhel8/' + scale_zimon_url: 'zimon_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: install | zimon path diff --git a/roles/nfs_upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml index b34eb3be..7fbc70c4 100644 --- a/roles/nfs_upgrade/tasks/install_repository.yml +++ b/roles/nfs_upgrade/tasks/install_repository.yml @@ -11,7 +11,7 @@ - name: upgrade | nfs path set_fact: - scale_nfs_url: 'ganesha_rpms/rhel8/' + scale_nfs_url: 'ganesha_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: upgrade | nfs path @@ -37,7 +37,12 @@ - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_rpms/rhel8/' - when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version >= '8' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: upgrade | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: upgrade | zimon path set_fact: diff --git a/roles/perfmon_install/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml index bb34c3c6..6ced865b 100644 --- a/roles/perfmon_install/tasks/install_repository.yml +++ b/roles/perfmon_install/tasks/install_repository.yml @@ -21,7 +21,7 @@ - name: install | zimon path set_fact: - scale_zimon_url: 'zimon_rpms/rhel8/' + scale_zimon_url: 'zimon_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: install | zimon path diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index 08b552ec..667ff21a 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -23,7 +23,7 @@ - name: upgrade | zimon path set_fact: - scale_zimon_url: 'zimon_rpms/rhel8/' + scale_zimon_url: 'zimon_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: upgrade | zimon path diff --git a/roles/smb_install/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml index cc240b7e..f5d5a92b 100644 --- a/roles/smb_install/tasks/install_repository.yml +++ b/roles/smb_install/tasks/install_repository.yml @@ -11,7 +11,7 @@ - name: install | smb path set_fact: - scale_smb_url: 'smb_rpms/rhel8/' + scale_smb_url: 'smb_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: install | smb path diff --git a/roles/smb_upgrade/tasks/install_repository.yml b/roles/smb_upgrade/tasks/install_repository.yml index 6b5e32b3..6d1c3aa0 100644 --- a/roles/smb_upgrade/tasks/install_repository.yml +++ b/roles/smb_upgrade/tasks/install_repository.yml @@ -11,7 +11,7 @@ - name: upgrade | smb path set_fact: - scale_smb_url: 'smb_rpms/rhel8/' + scale_smb_url: 'smb_rpms/rhel9/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' - name: upgrade | smb path From 69b51d150a90498f5b043d67dfded61333eef668 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Mon, 27 Jun 2022 13:23:16 +0530 Subject: [PATCH 086/113] CES fix for nextgen Signed-off-by: Rajan Mishra rajanmis@in.ibm.com --- roles/ces_common/defaults/main.yml | 6 ++++++ roles/ces_common/tasks/configure.yml | 10 ++++++++++ roles/obj_install/meta/main.yml | 1 - 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/roles/ces_common/defaults/main.yml b/roles/ces_common/defaults/main.yml index 23fd6038..d3225d76 100644 --- a/roles/ces_common/defaults/main.yml +++ b/roles/ces_common/defaults/main.yml @@ -1,4 +1,10 @@ --- +## Spectrum Scale daemon nodename (defaults to node's hostname) +scale_daemon_nodename: "{{ ansible_hostname }}" + +## Spectrum Scale admin nodename (defaults to node's hostname) +scale_admin_nodename: "{{ scale_daemon_nodename }}" + # Default variables for the IBM Spectrum Scale (NFS) role - # either edit this file or define your own variables to override the defaults # If ces groups is defined, scale_protocols in scale_clusterdefinition.json will look like below diff --git a/roles/ces_common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml index 2f6b5e3f..8590adb8 100644 --- a/roles/ces_common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -7,6 +7,16 @@ scale_service_list: [] scale_ces_nodes: "" +- name: check | Set default daemon nodename + set_fact: + scale_daemon_nodename: "{{ scale_daemon_nodename }}" + when: hostvars[inventory_hostname].scale_daemon_nodename is undefined + +- name: check | Set default admin nodename + set_fact: + scale_admin_nodename: "{{ scale_admin_nodename }}" + when: hostvars[inventory_hostname].scale_admin_nodename is undefined + - name: configure | Collect status of cesSharedRoot command: "{{ scale_command_path }}mmlsconfig cesSharedRoot" register: scale_ces_status diff --git a/roles/obj_install/meta/main.yml b/roles/obj_install/meta/main.yml index f7e9bc91..ba337e84 100644 --- a/roles/obj_install/meta/main.yml +++ b/roles/obj_install/meta/main.yml @@ -17,4 +17,3 @@ galaxy_info: dependencies: - ibm.spectrum_scale.obj_prepare - - ibm.spectrum_scale.ces_common From 4c0a26028b0b7839d8c85b28727380d225293460 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 28 Jun 2022 19:38:07 +0530 Subject: [PATCH 087/113] Node add fix Signed-off-by: Rajan Mishra --- roles/core_install/tasks/upgrade.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/core_install/tasks/upgrade.yml b/roles/core_install/tasks/upgrade.yml index f19dc469..40e502c0 100644 --- a/roles/core_install/tasks/upgrade.yml +++ b/roles/core_install/tasks/upgrade.yml @@ -54,6 +54,11 @@ scale_repo_gpfsversion: "{{ package_gpfs_version }}" when: scale_install_repository_url is defined +- set_fact: + scale_vars_update: "{{ ansible_play_hosts| + map('extract', hostvars, 'scale_install_needsupdate')| + list }}" + run_once: true - block: ## run_once: true - name: update | Check if any running node needs to be updated @@ -67,6 +72,6 @@ ###################################################################### assert: that: - - true not in ansible_play_hosts | map('extract', hostvars, 'scale_install_needsupdate') | list + - "{{ 'True' not in scale_vars_update }}" msg: "{{ msg.split('\n') }}" run_once: true From c9090dc40ce7a9ac5e001f77cc264bbc1956c88b Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Fri, 1 Jul 2022 16:51:48 +0200 Subject: [PATCH 088/113] Add role name mapping table to migration guide Signed-off-by: Achim Christ --- MIGRATING.md | 145 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 103 insertions(+), 42 deletions(-) diff --git a/MIGRATING.md b/MIGRATING.md index 53fb9289..336ec565 100644 --- a/MIGRATING.md +++ b/MIGRATING.md @@ -14,48 +14,109 @@ All playbooks using the Ansible roles provided by this project need to adapt thi The following steps need to be taken in order to consume the `main` branch in your own projects: -- Repository contents need to be placed in a `collections/ansible_collections/ibm/spectrum_scale` directory, adjacent to your playbooks. The easiest way to do this is to clone the correct branch into the appropriate path: - - ```shell - $ git clone -b main https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale - ``` - - The resulting directory structure should look similar to this: - - ```shell - my_project/ - ├── collections/ - │ └── ansible_collections/ - │ └── ibm/ - │ └── spectrum_scale/ - │ └── ... - ├── hosts - └── playbook.yml - ``` - -- Once the repository contents are available in the appropriate path, roles can be referenced by using their Fully Qualified Collection Name (FQCN). A minimal playbook should look similar to this: - - ```yaml - # playbook.yml: - --- - - hosts: cluster01 - roles: - - ibm.spectrum_scale.core_prepare - - ibm.spectrum_scale.core_install - - ibm.spectrum_scale.core_configure - - ibm.spectrum_scale.core_verify - ``` - - Refer to the [Ansible User Guide](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook) for details on using collections, including alternate syntax with the `collections` keyword. - - Note that all role names have changed: - - - Old naming: `[component]/[precheck|node|cluster|postcheck]` - - New naming: `[component]_[prepare|install|configure|verify]` - - Refer to the examples in the [samples/](samples/) directory for a list of new role names. - -- Some variables have been renamed for consistency as well, but it's expected that these changes only affect very few users. See [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details, and refer to [VARIABLESNEW.md](VARIABLESNEW.md) for a complete listing of all available variables. +- Repository contents need to be placed in a `collections/ansible_collections/ibm/spectrum_scale` directory, adjacent to your playbooks. The easiest way to do this is to clone the correct branch into the appropriate path: + + ```shell + $ git clone -b main https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale + ``` + + The resulting directory structure should look similar to this: + + ```shell + my_project/ + ├── collections/ + │ └── ansible_collections/ + │ └── ibm/ + │ └── spectrum_scale/ + │ └── ... + ├── hosts + └── playbook.yml + ``` + +- Once the repository contents are available in the appropriate path, roles can be referenced by using their Fully Qualified Collection Name (FQCN). A minimal playbook should look similar to this: + + ```yaml + # playbook.yml: + --- + - hosts: cluster01 + roles: + - ibm.spectrum_scale.core_prepare + - ibm.spectrum_scale.core_install + - ibm.spectrum_scale.core_configure + - ibm.spectrum_scale.core_verify + ``` + + Refer to the [Ansible User Guide](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook) for details on using collections, including alternate syntax with the `collections` keyword. + + Note that all role names have changed: + + - Old naming: `[component]/[precheck|node|cluster|postcheck]` + - New naming: `[component]_[prepare|install|configure|verify]` + + Refer to the [name mapping table](#role-name-mapping-table) for a list of new role names. + +- Some variables have been renamed for consistency as well, but it's expected that these changes only affect very few users. See [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details, and refer to [VARIABLESNEW.md](VARIABLESNEW.md) for a complete listing of all available variables. + +## Role Name Mapping Table + +| `master` branch | `main` branch | +| -------------------------------- | ---------------------------------------- | +| callhome/cluster | ibm.spectrum_scale.callhome_configure | +| callhome/node | ibm.spectrum_scale.callhome_install | +| callhome/postcheck | ibm.spectrum_scale.callhome_verify | +| callhome/precheck | ibm.spectrum_scale.callhome_prepare | +| core/cluster | ibm.spectrum_scale.core_configure | +| core/common | ibm.spectrum_scale.core_common | +| core/node | ibm.spectrum_scale.core_install | +| core/postcheck | ibm.spectrum_scale.core_verify | +| core/precheck | ibm.spectrum_scale.core_prepare | +| core/upgrade | ibm.spectrum_scale.core_upgrade | +| gui/cluster | ibm.spectrum_scale.gui_configure | +| gui/node | ibm.spectrum_scale.gui_install | +| gui/postcheck | ibm.spectrum_scale.gui_verify | +| gui/precheck | ibm.spectrum_scale.gui_prepare | +| gui/upgrade | ibm.spectrum_scale.gui_upgrade | +| nfs/cluster | ibm.spectrum_scale.nfs_configure | +| nfs/common | ibm.spectrum_scale.ces_common | +| nfs/node | ibm.spectrum_scale.nfs_install | +| nfs/postcheck | ibm.spectrum_scale.nfs_verify | +| nfs/precheck | ibm.spectrum_scale.nfs_prepare | +| nfs/upgrade | ibm.spectrum_scale.nfs_upgrade | +| remote_mount/ | ibm.spectrum_scale.remotemount_configure | +| scale_auth/upgrade | ibm.spectrum_scale.auth_upgrade | +| scale_ece/cluster | ibm.spectrum_scale.ece_configure | +| scale_ece/node | ibm.spectrum_scale.ece_install | +| scale_ece/precheck | ibm.spectrum_scale.ece_prepare | +| scale_ece/upgrade | ibm.spectrum_scale.ece_upgrade | +| scale_fileauditlogging/cluster | ibm.spectrum_scale.fal_configure | +| scale_fileauditlogging/node | ibm.spectrum_scale.fal_install | +| scale_fileauditlogging/postcheck | ibm.spectrum_scale.fal_verify | +| scale_fileauditlogging/precheck | ibm.spectrum_scale.fal_prepare | +| scale_fileauditlogging/upgrade | ibm.spectrum_scale.fal_upgrade | +| scale_hdfs/cluster | ibm.spectrum_scale.hdfs_configure | +| scale_hdfs/node | ibm.spectrum_scale.hdfs_install | +| scale_hdfs/postcheck | ibm.spectrum_scale.hdfs_verify | +| scale_hdfs/precheck | ibm.spectrum_scale.hdfs_prepare | +| scale_hdfs/upgrade | ibm.spectrum_scale.hdfs_upgrade | +| scale_hpt/node | ibm.spectrum_scale.afm_cos_install | +| scale_hpt/postcheck | ibm.spectrum_scale.afm_cos_verify | +| scale_hpt/precheck | ibm.spectrum_scale.afm_cos_prepare | +| scale_hpt/upgrade | ibm.spectrum_scale.afm_cos_upgrade | +| scale_object/cluster | ibm.spectrum_scale.obj_configure | +| scale_object/node | ibm.spectrum_scale.obj_install | +| scale_object/postcheck | ibm.spectrum_scale.obj_verify | +| scale_object/precheck | ibm.spectrum_scale.obj_prepare | +| scale_object/upgrade | ibm.spectrum_scale.obj_upgrade | +| smb/cluster | ibm.spectrum_scale.smb_configure | +| smb/node | ibm.spectrum_scale.smb_install | +| smb/postcheck | ibm.spectrum_scale.smb_verify | +| smb/precheck | ibm.spectrum_scale.smb_prepare | +| smb/upgrade | ibm.spectrum_scale.smb_upgrade | +| zimon/cluster | ibm.spectrum_scale.perfmon_configure | +| zimon/node | ibm.spectrum_scale.perfmon_install | +| zimon/postcheck | ibm.spectrum_scale.perfmon_verify | +| zimon/precheck | ibm.spectrum_scale.perfmon_prepare | +| zimon/upgrade | ibm.spectrum_scale.perfmon_upgrade | ## Migration script From 4538ae7195c19547e36076c0a90af705c05f418a Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 6 Jul 2022 14:56:19 +0530 Subject: [PATCH 089/113] RHEL9 directory structure for local and remote installation Signed-off-by: Rajan Mishra --- roles/fal_install/tasks/install_local_pkg.yml | 10 ++++++++++ roles/fal_install/tasks/install_remote_pkg.yml | 10 ++++++++++ roles/nfs_install/tasks/install_local_pkg.yml | 10 ++++++++++ roles/nfs_install/tasks/install_remote_pkg.yml | 10 ++++++++++ roles/perfmon_install/tasks/install_local_pkg.yml | 5 +++++ roles/perfmon_install/tasks/install_remote_pkg.yml | 5 +++++ roles/smb_install/tasks/install_local_pkg.yml | 5 +++++ roles/smb_install/tasks/install_remote_pkg.yml | 5 +++++ 8 files changed, 60 insertions(+) diff --git a/roles/fal_install/tasks/install_local_pkg.yml b/roles/fal_install/tasks/install_local_pkg.yml index be482acc..d83c841f 100644 --- a/roles/fal_install/tasks/install_local_pkg.yml +++ b/roles/fal_install/tasks/install_local_pkg.yml @@ -102,6 +102,16 @@ scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' diff --git a/roles/fal_install/tasks/install_remote_pkg.yml b/roles/fal_install/tasks/install_remote_pkg.yml index 7d29e7f0..1996c3ec 100644 --- a/roles/fal_install/tasks/install_remote_pkg.yml +++ b/roles/fal_install/tasks/install_remote_pkg.yml @@ -90,6 +90,16 @@ scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' diff --git a/roles/nfs_install/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml index 8a696f59..583f4ed0 100644 --- a/roles/nfs_install/tasks/install_local_pkg.yml +++ b/roles/nfs_install/tasks/install_local_pkg.yml @@ -107,6 +107,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu16/' @@ -137,6 +142,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/nfs_install/tasks/install_remote_pkg.yml b/roles/nfs_install/tasks/install_remote_pkg.yml index a861079f..cb929943 100644 --- a/roles/nfs_install/tasks/install_remote_pkg.yml +++ b/roles/nfs_install/tasks/install_remote_pkg.yml @@ -81,6 +81,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu16/' @@ -111,6 +116,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/perfmon_install/tasks/install_local_pkg.yml b/roles/perfmon_install/tasks/install_local_pkg.yml index c38bb80a..ba944101 100644 --- a/roles/perfmon_install/tasks/install_local_pkg.yml +++ b/roles/perfmon_install/tasks/install_local_pkg.yml @@ -121,6 +121,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' diff --git a/roles/perfmon_install/tasks/install_remote_pkg.yml b/roles/perfmon_install/tasks/install_remote_pkg.yml index 62af260f..b1881d17 100644 --- a/roles/perfmon_install/tasks/install_remote_pkg.yml +++ b/roles/perfmon_install/tasks/install_remote_pkg.yml @@ -95,6 +95,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' diff --git a/roles/smb_install/tasks/install_local_pkg.yml b/roles/smb_install/tasks/install_local_pkg.yml index 358354da..06912cdd 100644 --- a/roles/smb_install/tasks/install_local_pkg.yml +++ b/roles/smb_install/tasks/install_local_pkg.yml @@ -108,6 +108,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' diff --git a/roles/smb_install/tasks/install_remote_pkg.yml b/roles/smb_install/tasks/install_remote_pkg.yml index 6f9f18d1..67cc2e4a 100644 --- a/roles/smb_install/tasks/install_remote_pkg.yml +++ b/roles/smb_install/tasks/install_remote_pkg.yml @@ -81,6 +81,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' From f7c4d129ccd652aedfe66aadc82e539175d531d1 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Fri, 15 Jul 2022 14:26:40 +0530 Subject: [PATCH 090/113] Missing code from master to nextgen for tiebreaker node installation Signed-off-by: Rajan Mishra --- roles/core_install/tasks/install_dir_pkg.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/roles/core_install/tasks/install_dir_pkg.yml b/roles/core_install/tasks/install_dir_pkg.yml index 2269ff51..71c6a3d3 100644 --- a/roles/core_install/tasks/install_dir_pkg.yml +++ b/roles/core_install/tasks/install_dir_pkg.yml @@ -35,10 +35,10 @@ - block: - name: install | Copy installation package to node - copy: + synchronize: src: "{{ scale_install_directory_pkg_path }}" dest: "{{ scale_extracted_path }}" - mode: a+x + use_ssh_args: yes - name: install | Set installation package path set_fact: @@ -56,8 +56,20 @@ rpm_key: state: present key: "{{ dir_path }}/SpectrumScale_public_key.pgp" - when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) + when: + - ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") + - scale_gpgkey_path is undefined + +- name: Import a gpg key from a file + rpm_key: + state: present + key: "{{ scale_gpgkey_path }}/SpectrumScale_public_key.pgp" + when: + - ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) + and scale_enable_gpg_check and scale_version >= "5.0.5.0") + - scale_gpgkey_path is defined + # # Find GPFS BASE From 6da6ba8aadfc4a033b888467353fd05f2c82f015 Mon Sep 17 00:00:00 2001 From: Dherendra Singh Date: Fri, 14 Oct 2022 10:33:34 +0530 Subject: [PATCH 091/113] Update install_repository.yml --- roles/hdfs_install/tasks/install_repository.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/hdfs_install/tasks/install_repository.yml b/roles/hdfs_install/tasks/install_repository.yml index 6ebb0e20..a2a9a9ac 100644 --- a/roles/hdfs_install/tasks/install_repository.yml +++ b/roles/hdfs_install/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_hdfs_url: "{{ hdfs_rpm_path_rhel }}" when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | hdfs path on rhel9 + set_fact: + scale_hdfs_url: "{{ hdfs_rpm_path_rhel }}" + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | Configure hdfs YUM repository yum_repository: name: spectrum-scale-hdfs From 253a9847af2f5a41e1f43fcb3a59f11abca6652a Mon Sep 17 00:00:00 2001 From: Dherendra Singh Date: Tue, 18 Oct 2022 13:32:29 +0530 Subject: [PATCH 092/113] RHEL9 support for HDFS From 10fe8e4193db74d8fd2128d8646cabfdf7ee0424 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 2 Nov 2022 14:48:08 +0530 Subject: [PATCH 093/113] Remote mount variable name fix Signed-off-by: Rajan Mishra --- roles/remotemount_configure/tasks/mount_filesystems.yml | 3 ++- roles/remotemount_configure/tasks/precheck.yml | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/roles/remotemount_configure/tasks/mount_filesystems.yml b/roles/remotemount_configure/tasks/mount_filesystems.yml index a36c2317..8873e968 100644 --- a/roles/remotemount_configure/tasks/mount_filesystems.yml +++ b/roles/remotemount_configure/tasks/mount_filesystems.yml @@ -62,6 +62,7 @@ status_code: - 200 - 400 + - 404 register: remote_filesystem_results ignore_errors: true run_once: True @@ -111,4 +112,4 @@ delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True - when: (remote_filesystem_results.status == 400) \ No newline at end of file + when: (remote_filesystem_results.status == 400) or (remote_filesystem_results.status == 404) diff --git a/roles/remotemount_configure/tasks/precheck.yml b/roles/remotemount_configure/tasks/precheck.yml index 88227f13..b789d7fb 100644 --- a/roles/remotemount_configure/tasks/precheck.yml +++ b/roles/remotemount_configure/tasks/precheck.yml @@ -71,9 +71,9 @@ - block: # RESTAPI - when: scale_remotemount_client_no_gui == false - name: Main | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -97,9 +97,9 @@ - name: Main | Client Cluster (access) | Check Connectivity to Client Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" From 14377b17d5d4eff81105f4af5eb3badea7f8249f Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Wed, 16 Nov 2022 13:13:33 +0530 Subject: [PATCH 094/113] Fix for GUI user creation Signed-off-by: Rajan Mishra --- roles/gui_configure/tasks/main.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/roles/gui_configure/tasks/main.yml b/roles/gui_configure/tasks/main.yml index c82851c7..8d0c0ca2 100644 --- a/roles/gui_configure/tasks/main.yml +++ b/roles/gui_configure/tasks/main.yml @@ -13,11 +13,22 @@ - scale_gui_password_policy_change | bool tags: chpasswdpolicy +- name: check | Check gui nodes if defined + add_host: + name: "{{ hostvars[item]['inventory_hostname'] }}" + groups: scale_gui_defined_listnodes + when: + - hostvars[item].scale_cluster_gui is defined + - (hostvars[item].scale_cluster_gui is defined and hostvars[item].scale_cluster_gui | bool) + with_items: "{{ ansible_play_hosts }}" + changed_when: false + - import_tasks: users.yml when: - - scale_cluster_gui | bool - scale_gui_admin_user is defined - scale_gui_admin_hc_vault_user is not defined + - groups['scale_gui_defined_listnodes'] is defined and groups['scale_gui_defined_listnodes'] | length > 0 + delegate_to: "{{ groups['scale_gui_defined_listnodes'].0 }}" tags: users - import_tasks: ldap.yml From f823cf4bb10fc38a5cc3d354f3dca54ed8e27d88 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 22 Dec 2022 16:16:59 +0530 Subject: [PATCH 095/113] New option for mmcrcluster for future release dev branch Signed-off-by: Rajan Mishra --- roles/core_configure/tasks/cluster.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/core_configure/tasks/cluster.yml b/roles/core_configure/tasks/cluster.yml index d1030b83..d5dfb2d7 100644 --- a/roles/core_configure/tasks/cluster.yml +++ b/roles/core_configure/tasks/cluster.yml @@ -149,6 +149,12 @@ when: - scale_cluster_config is defined and scale_cluster_config.remote_file_copy is defined + - name: cluster | Set gpfs cluster user defined port if it is defined + set_fact: + extra_option: "{{ extra_option }} --port {{ scale_cluster_config.scale_port_number }}" + when: + - scale_cluster_config is defined and scale_cluster_config.scale_port_number is defined + - name: cluster | Create new cluster command: /usr/lpp/mmfs/bin/mmcrcluster -N /var/mmfs/tmp/NodeFile -C {{ scale_cluster_clustername }} {{ profile_type }} {{ extra_option }} notify: accept-licenses From 3b6f5853308580d3899239c3082fa19ab3a4f888 Mon Sep 17 00:00:00 2001 From: Christoph Keil Date: Fri, 6 Jan 2023 10:49:24 +0100 Subject: [PATCH 096/113] Add step to wait until GUI is up and running Ansible failed, when two GUI's are installed and started. Signed-off-by: Christoph Keil --- roles/gui_configure/tasks/configure.yml | 9 +++++++++ roles/nfs_install/tasks/install_local_pkg.yml | 1 + 2 files changed, 10 insertions(+) diff --git a/roles/gui_configure/tasks/configure.yml b/roles/gui_configure/tasks/configure.yml index 0ff8e7d8..c96bc766 100644 --- a/roles/gui_configure/tasks/configure.yml +++ b/roles/gui_configure/tasks/configure.yml @@ -15,8 +15,17 @@ name: gpfsgui state: started enabled: true + no_block: true when: scale_cluster_gui | bool +# Verify GUI is up and running +- name: Wait until gpfsgui is up and running + shell: "systemctl is-active gpfsgui" + register: systemctl_out + until: systemctl_out.stdout == "active" + retries: 10 + delay: 20 + # # Initialize the GUI so that user dont need to wait and HTTPs certificate and be imported. # diff --git a/roles/nfs_install/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml index 583f4ed0..3867582a 100644 --- a/roles/nfs_install/tasks/install_local_pkg.yml +++ b/roles/nfs_install/tasks/install_local_pkg.yml @@ -361,6 +361,7 @@ with_items: - "{{ scale_install_gpfs_nfs_python.files }}" - "{{ scale_install_gpfs_nfs_doc.files }}" + - "{{ scale_install_gpfs_nfs_gpfs.files }}" when: ansible_distribution in scale_ubuntu_distribution - name: install | Add GPFS package to list From 28a294d48a088cc53d2a08e6067d9d7e88b9dd74 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 17 Jan 2023 14:10:51 +0530 Subject: [PATCH 097/113] Fix GPL sles installation issue Signed-off-by: Rajan Mishra --- roles/core_install/tasks/install_gplbin.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/core_install/tasks/install_gplbin.yml b/roles/core_install/tasks/install_gplbin.yml index 620e1f2b..80883c95 100644 --- a/roles/core_install/tasks/install_gplbin.yml +++ b/roles/core_install/tasks/install_gplbin.yml @@ -23,6 +23,7 @@ description: IBM Spectrum Scale (GPFS) GPL module baseurl: "{{ scale_install_gplbin_repository_url }}" gpgcheck: false + validate_certs: no state: present when: - ansible_pkg_mgr == 'apt' @@ -33,9 +34,10 @@ zypper_repository: name: spectrum-scale-gplbin description: IBM Spectrum Scale (GPFS) GPL module - baseurl: "{{ scale_install_gplbin_repository_url }}" - gpgcheck: false + repo: "{{ scale_install_gplbin_repository_url }}" + disable_gpg_check: yes state: present + overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' - scale_install_gplbin_repository_url is defined From 84ab9b5ff4de7e3ff671ef96bf0b6eee18f11f4d Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 17 Jan 2023 14:18:01 +0530 Subject: [PATCH 098/113] GPL fix for ubuntu Signed-off-by: Rajan Mishra --- roles/core_install/tasks/install_gplbin.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/roles/core_install/tasks/install_gplbin.yml b/roles/core_install/tasks/install_gplbin.yml index 80883c95..df2468b5 100644 --- a/roles/core_install/tasks/install_gplbin.yml +++ b/roles/core_install/tasks/install_gplbin.yml @@ -19,12 +19,13 @@ - name: install | Configure GPL module repository apt_repository: - name: spectrum-scale-gplbin - description: IBM Spectrum Scale (GPFS) GPL module - baseurl: "{{ scale_install_gplbin_repository_url }}" - gpgcheck: false + filename: spectrum-scale-gplbin + repo: "{{ scale_install_gplbin_repository_url }}" validate_certs: no state: present + update_cache: yes + codename: IBM Spectrum Scale (GPFS) GPL module + mode: 0777 when: - ansible_pkg_mgr == 'apt' - scale_install_gplbin_repository_url is defined From 11bca30bf62642f731864a5c5fde199270cc5e54 Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Tue, 24 Jan 2023 23:03:55 +0530 Subject: [PATCH 099/113] Fixed mmbuilgpl upgrade issue Signed-off-by: Rajan Mishra --- roles/core_upgrade/tasks/build.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/roles/core_upgrade/tasks/build.yml b/roles/core_upgrade/tasks/build.yml index 1583cec5..b51ab046 100644 --- a/roles/core_upgrade/tasks/build.yml +++ b/roles/core_upgrade/tasks/build.yml @@ -29,8 +29,7 @@ # - name: build | Compile GPL module shell: export LINUX_DISTRIBUTION={{ scale_build_distribution }} ; /usr/lpp/mmfs/bin/mmbuildgpl --quiet - args: - creates: /lib/modules/{{ ansible_kernel }}/extra/mmfs26.ko + register: scale_build_gpl - name: build | Stat GPL module stat: From 3b79f4a416f097c1618b6ef6034f4a73e6b97fac Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 2 Feb 2023 22:45:13 +0530 Subject: [PATCH 100/113] Fixed FAL issue Signed-off-by: Rajan Mishra --- roles/fal_configure/tasks/configure.yml | 8 ++++++++ roles/fal_configure/tasks/configure_fal.yml | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/roles/fal_configure/tasks/configure.yml b/roles/fal_configure/tasks/configure.yml index ee8c5ac1..340dc8a8 100644 --- a/roles/fal_configure/tasks/configure.yml +++ b/roles/fal_configure/tasks/configure.yml @@ -23,6 +23,13 @@ changed_when: false failed_when: false + - name: configure | Find existing File audit logging filesystem(s) + shell: + cmd: "/usr/lpp/mmfs/bin/mmaudit all list -Y | grep -v HEADER | cut -d ':' -f 8 | uniq" + register: scale_existing_audit + changed_when: false + failed_when: false + - name: configure | configure file audit logging vars: scale_fal_localspace_force: "{{ '--skip-local-space-check' if scale_fal_skip_localspace is defined else '' }}" @@ -33,6 +40,7 @@ {{ scale_fal_localspace_force }} register: scale_audit_command when: + - item not in scale_existing_audit.stdout_lines - scale_storage_fsdefs_audit is defined and scale_storage_fsdefs_audit | length >= 1 - (scale_storage_fsparams_audit[item].scale_fal_enable is defined) and (scale_storage_fsparams_audit[item].scale_fal_enable | bool) with_items: "{{ scale_storage_fsdefs_audit }}" diff --git a/roles/fal_configure/tasks/configure_fal.yml b/roles/fal_configure/tasks/configure_fal.yml index 8c8b1fb1..d0228b65 100644 --- a/roles/fal_configure/tasks/configure_fal.yml +++ b/roles/fal_configure/tasks/configure_fal.yml @@ -23,6 +23,13 @@ changed_when: false failed_when: false + - name: configure | Find existing File audit logging filesystem(s) + shell: + cmd: "/usr/lpp/mmfs/bin/mmaudit all list -Y | grep -v HEADER | cut -d ':' -f 8 | uniq" + register: scale_existing_audit + changed_when: false + failed_when: false + - name: configure | configure file audit logging vars: scale_fal_localspace_force: "{{ '--skip-local-space-check' if scale_fal_skip_localspace is defined else '' }}" @@ -33,6 +40,7 @@ {{ scale_fal_localspace_force }} register: scale_audit_command when: + - item not in scale_existing_audit.stdout_lines - scale_storage_fsdefs_audit is defined and scale_storage_fsdefs_audit | length >= 1 - (scale_storage_fsparams_audit[item].scale_fal_enable is defined) and (scale_storage_fsparams_audit[item].scale_fal_enable | bool) with_items: "{{ scale_storage_fsdefs_audit }}" From 08737e8788667a8248159d37c860c0ccf0ea13fc Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Mon, 20 Mar 2023 11:59:44 +0530 Subject: [PATCH 101/113] Multi DA Support Dev branch Signed-off-by: Rajan Mishra --- roles/ece_configure/tasks/create_vdisk.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/roles/ece_configure/tasks/create_vdisk.yml b/roles/ece_configure/tasks/create_vdisk.yml index 98a0f2c3..e239dbc7 100644 --- a/roles/ece_configure/tasks/create_vdisk.yml +++ b/roles/ece_configure/tasks/create_vdisk.yml @@ -23,6 +23,27 @@ - item.ec is defined - item.blocksize is defined - item.Size is defined + - item.da is not defined + + - name: create | Define Vdiskset + vars: + current_vs: "{{ item.vdisk | default('vs_' + (item.rg | regex_replace('\\W', '_')) | basename) }}" + current_rg: "{{ item.rg }}" + current_code: "{{ item.ec }}" + current_bs: "{{ item.blocksize }}" + current_size: "{{ item.Size }}" + extra_option: "{{ item.da }}" + command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }} --da {{ extra_option }}" + register: scale_vs_define + failed_when: scale_vs_define.rc != 0 + when: + - current_vs not in scale_existing_vs.stdout_lines + - item.vdisk is defined + - item.rg is defined + - item.ec is defined + - item.blocksize is defined + - item.Size is defined + - item.da is defined - name: create | Create Vdiskset vars: From 31575554f79ddfdd0ab8ad37a4b99d6ee8eabd3c Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 11:29:07 +0100 Subject: [PATCH 102/113] Rename "Spectrum Scale" to "IBM Storage Scale" Signed-off-by: Achim Christ --- README.md | 61 +++++++++++---------- VARIABLES.md | 140 ++++++++++++++++++++++++++++++++++++++++++++++++ VARIABLESNEW.md | 140 ------------------------------------------------ 3 files changed, 172 insertions(+), 169 deletions(-) create mode 100644 VARIABLES.md delete mode 100644 VARIABLESNEW.md diff --git a/README.md b/README.md index 1face777..6645f654 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,10 @@ __Important__: You are viewing the `main` branch of this repository. If you've p * * * -IBM Spectrum Scale (GPFS) Deployment using Ansible Roles -======================================================== +IBM Storage Scale (GPFS) Deployment using Ansible Roles +======================================================= -Ansible project with multiple roles for installing and configuring IBM Spectrum Scale (GPFS). +Ansible project with multiple roles for installing and configuring IBM Storage Scale (GPFS) software defined storage. **Table of Contents** @@ -47,14 +47,16 @@ Features - [x] Generate SSH key - [x] User must set up base OS repositories -#### Core Spectrum Scale prerequisites +#### Core IBM Storage Scale prerequisites + - [x] Install yum-utils package - [x] Install gcc-c++, kernel-devel, make - [x] Install elfutils,elfutils-devel (RHEL8 specific) -#### Core Spectrum Scale Cluster features -- [x] Install core Spectrum Scale packages on Linux nodes -- [x] Install Spectrum Scale license packages on Linux nodes +#### Core IBM Storage Scale Cluster features + +- [x] Install core IBM Storage Scale packages on Linux nodes +- [x] Install IBM Storage Scale license package on Linux nodes - [x] Compile or install pre-compiled Linux kernel extension (mmbuildgpl) - [x] Configure client and server license - [x] Assign default quorum (maximum 7 quorum nodes) if user has not defined in the inventory @@ -70,21 +72,23 @@ Features - [x] Extend NSDs and file system - [x] Add disks to existing file systems -#### Spectrum Scale Management GUI features -- [x] Install Spectrum Scale management GUI packages on GUI designated nodes -- [x] maximum 3 management GUI nodes to be configured +#### IBM Storage Scale Management GUI features + +- [x] Install IBM Storage Scale management GUI packages on designated GUI nodes - [x] Install performance monitoring sensor packages on all Linux nodes - [x] Install performance monitoring packages on all GUI designated nodes - [x] Configure performance monitoring and collectors - [ ] Configure HA federated mode collectors -#### Spectrum Scale Callhome features -- [x] Install Spectrum Scale callhome packages on all cluster nodes -- [x] Configure callhome +#### IBM Storage Scale Call Home features + +- [x] Install IBM Storage Scale Call Home packages on all cluster nodes +- [x] Configure Call Home -#### Spectrum Scale CES (SMB and NFS) Protocol supported features (5.0.5.2) -- [x] Install Spectrum Scale SMB or NFS on selected cluster nodes -- [x] Install Spectrum Scale OBJECT on selected cluster nodes (5.1.1.0) +#### IBM Storage Scale CES (SMB and NFS) Protocol supported features + +- [x] Install IBM Storage Scale SMB or NFS on selected cluster nodes (5.0.5.2 and above) +- [x] Install IBM Storage Scale Object on selected cluster nodes (5.1.1.0 and above) - [x] CES IPV4 or IPV6 support - [x] CES interface mode support @@ -129,15 +133,15 @@ Users need to have a basic understanding of the [Ansible concepts](https://docs. Note that [Python 3](https://docs.ansible.com/ansible/latest/reference_appendices/python_3_support.html) is required for certain functionality of this project to work. Ansible should automatically detect and use Python 3 on managed machines, refer to the [Ansible documentation](https://docs.ansible.com/ansible/latest/reference_appendices/python_3_support.html#using-python-3-on-the-managed-machines-with-commands-and-playbooks) for details and workarounds. -- **Download Spectrum Scale packages** +- **Download IBM Storage Scale packages** - A Developer Edition Free Trial is available at this site: https://www.ibm.com/account/reg/us-en/signup?formid=urx-41728 - - Customers who have previously purchased Spectrum Scale can obtain entitled versions from IBM Fix Central. Visit https://www.ibm.com/support/fixcentral and search for 'IBM Spectrum Scale (Software defined storage)'. + - Customers who have previously purchased IBM Storage Scale can obtain entitled versions from IBM Fix Central. Visit https://www.ibm.com/support/fixcentral and search for 'IBM Storage Scale (Software defined storage)'. -- **Create password-less SSH keys between all Spectrum Scale nodes in the cluster** +- **Create password-less SSH keys between all nodes in the cluster** - A pre-requisite for installing Spectrum Scale is that password-less SSH must be configured among all nodes in the cluster. Password-less SSH must be configured and verified with [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name), hostname, and IP of every node to every node. + A pre-requisite for installing IBM Storage Scale is that password-less SSH must be configured among all nodes in the cluster. Password-less SSH must be configured and verified with [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name), hostname, and IP of every node to every node. Example: @@ -179,7 +183,7 @@ Installation Instructions - **Create Ansible inventory** - Define Spectrum Scale nodes in the [Ansible inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) (e.g. `hosts`) in the following format: + Define IBM Storage Scale nodes in the [Ansible inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) (e.g. `hosts`) in the following format: ```yaml # hosts: @@ -223,7 +227,7 @@ Installation Instructions Refer to [VARIABLES.md](VARIABLES.md) for a full list of all supported configuration options. -- **Run the playbook to install and configure the Spectrum Scale cluster** +- **Run the playbook to install and configure the IBM Storage Scale cluster** - Using the `ansible-playbook` command: @@ -292,9 +296,8 @@ Users can define [variables](https://docs.ansible.com/ansible/latest/user_guide/ Additional functionality can be enabled by defining further variables. Browse the examples in the [samples/](samples/) directory to learn how to: - Configure storage and file systems (see [samples/playbook_storage.yml](samples/playbook_storage.yml)) -- Configure node classes and Spectrum Scale configuration attributes (see [samples/playbook_nodeclass.yml](samples/playbook_nodeclass.yml)) -- Deploy Spectrum Scale using JSON inventory (see [samples/playbook_json_ces.yml](samples/playbook_json_ces.yml)) - +- Configure node classes and configuration attributes (see [samples/playbook_nodeclass.yml](samples/playbook_nodeclass.yml)) +- Deploy IBM Storage Scale using JSON inventory (see [samples/playbook_json_ces.yml](samples/playbook_json_ces.yml)) Available Roles --------------- @@ -325,9 +328,9 @@ Note that [Core GPFS](roles/core) is the only mandatory role, all other roles ar Cluster Membership ------------------ -All hosts in the play are configured as nodes in the same Spectrum Scale cluster. If you want to add hosts to an existing cluster then add at least one node from that existing cluster to the play. +All hosts in the play are configured as nodes in the same IBM Storage Scale cluster. If you want to add hosts to an existing cluster then add at least one node from that existing cluster to the play. -You can create multiple clusters by running multiple plays. Note that you will need to [reload the inventory](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/meta_module.html) to clear dynamic groups added by the Spectrum Scale roles: +You can create multiple clusters by running multiple plays. Note that you will need to [reload the inventory](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/meta_module.html) to clear dynamic groups added by the IBM Storage Scale roles: ```yaml - name: Create one cluster @@ -354,12 +357,12 @@ The roles in this project can (currently) be used to create new clusters or exte Furthermore, upgrades are not currently in scope of this role. Spectrum Scale supports rolling online upgrades (by taking down one node at a time), but this requires careful planning and monitoring and might require manual intervention in case of unforeseen problems. +Furthermore, upgrades are not currently in scope of this role. IBM Storage Scale supports rolling online upgrades (by taking down one node at a time), but this requires careful planning and monitoring and might require manual intervention in case of unforeseen problems. Troubleshooting --------------- -The roles in this project store configuration files in `/var/mmfs/tmp` on the first host in the play. These configuration files are kept to determine if definitions have changed since the previous run, and to decide if it's necessary to run certain Spectrum Scale commands (again). When experiencing problems one can simply delete these configuration files from `/var/mmfs/tmp` in order to clear the cache — doing so forces re-application of all definitions upon the next run. As a downside, the next run may take longer than expected as it might re-run unnecessary Spectrum Scale commands. This will automatically re-generate the cache. - +The roles in this project store configuration files in `/var/mmfs/tmp` on the first host in the play. These configuration files are kept to determine if definitions have changed since the previous run, and to decide if it's necessary to run certain IBM Storage Scale commands (again). When experiencing problems one can simply delete these configuration files from `/var/mmfs/tmp` in order to clear the cache — doing so forces re-application of all definitions upon the next run. As a downside, the next run may take longer than expected as it might re-run unnecessary IBM Storage Scale commands. This will automatically re-generate the cache. Reporting Issues and Feedback ----------------------------- diff --git a/VARIABLES.md b/VARIABLES.md new file mode 100644 index 00000000..5276cfc7 --- /dev/null +++ b/VARIABLES.md @@ -0,0 +1,140 @@ +Variables used by IBM Storage Scale (GPFS) Ansible project +========================================================== + +Variables list is dived into each of the Ansible roles. + +Role: Core - Core IBM Storage Scale installation and configuration +------------------------------------------------------------------ + +| Variables | Default | Options | User Mandatory | Descriptions | +| -------------------------------------- | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_architecture: | {{ansible_architecture}} | x86_64 or ppc64le | no | This ansible_architecture is gather from ansible get_facts module, IBM Storage Scale architecture that you want to install on your nodes. | +| scale_version: | none | 5.x.x.x | yes | Specify the IBM Storage Scale version that you want to install on your nodes. With 5.0.5.x. | +| scale_daemon_nodename: | {{ansible_hostname}} | none | no | IBM Storage Scale daemon nodename defaults to nodes hostname | +| scale_admin_nodename: | {{ansible_hostname}} | none | no | IBM Storage Scale admin nodename defaults to nodes hostname | +| scale_state: | present | present,maintenance,absent | no | Desired state of the IBM Storage Scale node. present - node will be added to cluster, daemon will be started maintenance
node will be added to cluster, daemon will not be started absent - node will be removed from cluster | +| scale_prepare_disable_selinux | false | true or false | no | Whether or not to disable SELinux. | +| scale_reboot_automatic | false | true or false | no | Whether or not to automatically reboot nodes - if set to false then only a message is printed. If set to true then nodes are automatically rebooted (dangerous!). | +| scale_prepare_enable_ssh_login | false | true or false | no | Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). | +| scale_prepare_restrict_ssh_address | false | true or false | no | Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires scale_prepare_enable_ssh_login to be enabled. | +| scale_prepare_disable_ssh_hostkeycheck | false | true or false | no | Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). | +| scale_prepare_exchange_keys | false | true or false | no | Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires scale_prepare_exchange_keys to be enabled, too. | +| scale_prepare_pubkey_path | /root/.ssh/id_rsa.pub | /root/.ssh/gpfskey.pub | no | example: /root/.ssh/gpfskey.pub | +| scale_prepare_disable_firewall | default: false | true or false | no | Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to false and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). | +| scale_install_localpkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to the self-extracting IBM Storage Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. | +| scale_install_remotepkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to IBM Storage Scale installation package on the remote system (accessible on Ansible managed node). | +| scale_install_repository_url | none | example: http://server/gpfs/ | yes | Specify the URL of the (existing) IBM Storage Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository).
Note that if this is a URL then a new repository definition will be created. If this variable is set to existing then it is assumed that a repository definition already exists and thus will _not_ be created. | +| scale_install_directory_pkg_path | none | example: /tmp/gpfs/ | yes | Specify the path to the user-provided directory, containing all IBM Storage Scale packages. Note that for this installation method all packages need to be kept in a single directory. | +| scale_cluster_quorum | false | true or false | no | If you dont specify any quorum nodes then the first seven hosts in your inventory will automatically be assigned the quorum role. even if this variable is false | +| scale_cluster_manager | false | true or false | no | Nodes default manager role - you ll likely want to define per-node roles in your inventory | +| scale_cluster_profile_name: | none | gpfsprotocoldefaults or gpfsprotocolrandomio | no | Specifies a predefined profile of attributes to be applied. System-defined profiles are located in /usr/lpp/mmfs/profiles/
The following system-defined profile names are accepted. gpfsprotocoldefaults and gpfsprotocolrandomio
eg. If you want to apply gpfsprotocoldefaults then specify scale_cluster_profile_name: gpfsprotocoldefaults | +| scale_cluster_profile_dir_path | /usr/lpp/mmfs/profiles/ | Path to cluster profile: example: /usr/lpp/mmfs/profiles/ | no | Fixed variable related to mmcrcluster profile. System-defined profiles are located in /usr/lpp/mmfs/profiles/ | +| scale_enable_gpg_check: | true | true or false | no | Enable/disable gpg key flag | +| scale_install_localpkg_tmpdir_path | /tmp | path to folder. | no | Temporary directory to copy installation package to (local package installation method) | +| scale_nodeclass: | none | Name of the nodeclass: example scale nodeclass: - class1 | no | Node classes can be defined on a per-node basis by defining the scale_nodeclass variable. | +| scale_config: | none | scale_config:
  - nodeclass: class1
    params:
        - pagepool: 4G
        - autoload: yes
        - ignorePrefetchLunCount: yes | no | Configuration attributes can be defined as variables for _any_ host in the play
The host for which you define the configuration attribute is irrelevant. Refer to the man mmchconfig man page for a list of available configuration attributes. | +| scale_storage: | none | scale_storage:
     filesystem: gpfs01
     blockSize: 4M
     maxMetadataReplicas: 2
     defaultMetadataReplicas: 2
     maxDataReplicas: 2
     defaultDataReplicas: 2
     numNodes: 16
     automaticMountOption: true
     defaultMountPoint: /mnt/gpfs01
     disks:
          - device: /dev/sdb
            nsd: nsd_1
            servers: scale01
            failureGroup: 10
            usage: metadataOnly
            pool: system
          - device: /dev/sdc
            nsd: nsd_2
            servers: scale01
            failureGroup: 10
            usage: dataOnly
            pool: data | no | Refer to man mmchfs and man mmchnsd man pages for a description of these storage parameters.
The filesystem parameter is mandatory, servers, and the device parameter is mandatory for each of the file systems disks.
All other file system and disk parameters are optional. scale*storage \_must* be define using group variables.
Do _not_ define disk parameters using host variables or inline variables in your playbook.
Doing so would apply them to all hosts in the group/play, thus defining the same disk multiple times... | +| scale_admin_node | false | true or false | no | Set admin flag on node for Ansible to use. | +| scale_nsd_server | scale_nsd_server | true or false | no | Set nsd flag for installation purpose | + +Role: GUI - GUI for Management of IBM Storage Scale Cluster +----------------------------------------------------------- + +| Variables | Default | Options | User Mandatory | Descriptions | +| --------------------------------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| scale_gui_hide_tip_callhome | false | true or false | no | Hide the Callhome not enabled tip on gui | +| scale_cluster_gui: | false | true or false | no | Install IBM Storage Scale GUI on nodes, set by host variables. | +| scale_service_gui_start: | true | true or false | no | Wheter or not to start the Scale GUI after installation. | +| scale_gui_admin_user: | none | admin | no | Spesify a name for the admin user to be created. | +| scale_gui_admin_password: | none | Admin@GUI! | no | Password to be set on the admin user | +| scale_gui_admin_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| scale_gui_user_username: | none | SEC | no | Ekstra IBM Storage Scale GUI user. example: Monitor or RestAPI. | +| scale_gui_user_password: | none | Storage@Scale1 | no | Password for extra user | +| scale_gui_user_role: | none | SystemAdmin | no | Role access for the extra user. | +| scale_gui_admin_hc_vault: | none | N/A | no | HasiCorp - Create local Admin user with password from vault, cant be combined with the scale_gui_admin_user | +| scale_gui_admin_hc_vault_user: | noen | admin | no | Create local admin user and write password to Vault | +| scale_gui_admin_hc_vault_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| scale_gui_cert_hc_vault: | false | true or false | no | Generate https Certificate from HasiCorp Vault and import it to Scale GUI.
The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normally the playbook is then run from Terraform. | +| scale_gui_password_policy_change: | false | true or false | no | Change default GUI User Password Policy change what you need in your inventory files and rest wil use default, used with **scale_gui_password_policy:** | | +| scale_gui_password_policy: | false | scale_gui_password_policy:
  minLength: 6
  maxAge: 900
  minAge: 0
  remember: 3
  minUpperChars: 0
  minLowerChars: 0
  minSpecialChars: 0
  minDigits: 0
  maxRepeat: 0
  minDiff: 1
  rejectOrAllowUserName: --rejectUserName | | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default.
  
 scale_gui_password_policy:
  minLength: 6 ## Minimum password length
  maxAge: 900 ## Maximum password age
  minAge: 0 ## Minimum password age
  remember: 3 ## Remember old passwords
  minUpperChars: 0 ## Minimum upper case characters
  minLowerChars: 0 ## Minimum lower case characters
  minSpecialChars: 0 ## Minimum special case characters
  minDigits: 0 ## Minimum digits
  maxRepeat: 0 ## Maximum number of repeat characters
  minDiff: 1 ## Minimum different characters with respect to old password
  rejectOrAllowUserName: --rejectUserName ## either --rejectUserName or --allowUserName | +| scale_gui_ldap_integration: | false | true or false | no | Active Directory information for Managing GUI users in an external AD or LDAP server | +| scale_gui_ldap: | none | scale_gui_ldap:
  name: 'myad'
  host: 'myad.mydomain.local'
  bindDn: 'CN=servicebind,CN=Users,DC=mydomain,DC=local'
  bindPassword: 'password'
  baseDn: 'CN=Users,DC=mydomain,DC=local'
  port: '389' #Default 389
  type: 'ad' #Default Microsoft Active Directory
  #securekeystore: /tmp/ad.jks #Local on GUI Node
  #secureport: '636' #Default 636 | no | Managing GUI users in an external AD or LDAP Parameters
 
 Parameter Description
  - **name:** Alias for your LDAP/AD server
  - **host:** The IP address or host name of the LDAP server.
  - **baseDn:** BasedDn string for the repository.
  - **bindDn:** BindDn string for the authentication user.
  - **bindPassword:** Password of the authentication user.
  - **port:** Port number of the LDAP. Default is 389
  - **type:** Repository type (ad, ids, domino, secureway, iplanet, netscape, edirectory or custom). Default is ad.
  - **securekeystore:** Location with file name of the keystore file (.jks, .p12 or .pfx).
  - **secureport:** Port number of the LDAP. 636 over SSL. | +| scale_gui_groups: | none | scale_gui_groups:
  administrator: 'scale-admin'
  securityadmin: 'scale-securityadmin'
  storageadmin: 'scale-storage-administrator'
  snapadmin: 'scale-snapshot-administrator'
  data_access: 'scale-data-access'
  monitor: 'scale-monitor'
  protocoladmin: 'scale-protocoladmin'
  useradmin: 'scale-useradmin' | no | The LDAP/AD Groups needs to be create in the LDAP. (You don't need created before deployment.)
You'll likely want to define this in your host inventory
Add the mappings that you want and replace the **scale-** with your ldap groups.

The following are the default user groups:
  - **Administrator** - Manages all functions on the system except those deals with managing users, user groups, and authentication.
  - **SecurityAdmin** - Manages all functions on the system, including managing users, user groups, and user authentication.
  - **SystemAdmin** - Manages clusters, nodes, alert logs, and authentication.
  - **StorageAdmin** - Manages disks, file systems, pools, filesets, and ILM policies.
  - **SnapAdmin** - Manages snapshots for file systems and filesets.
  - **DataAccess** - Controls access to data. For example, managing access control lists.
  - **Monitor** - Monitors objects and system configuration but cannot configure, modify, or manage the system or its resources.
  - **ProtocolAdmin** - Manages object storage and data export definitions of SMB and NFS protocols.
  - **UserAdmin** - Manages access for GUI users. Users who are part of this group have edit permissions only in the Access pages of the GUI. Check IBM doc for updated list | +| scale_gui_email_notification: | false | false or true | no | Enable E-mail notifications in IBM Storage Scale GUI | +| scale_gui_email: | none | scale_gui_email:
  name: 'SMTP_1'
  ipaddress: 'emailserverhost'
  ipport: '25'
  replay_email_address: scale-server-test@acme.com
  contact_name: 'scale-contact-person'
  subject: &cluster&message
  sender_login_id:
  password:
  headertext:
  footertext: | no | - The email feature transmits operational and error-related data in the form of an event notification email.
  - Email notifications can be customized by setting a custom header and footer for the emails and customizing the subject by selecting and combining from the following variables:
    &message, &messageId, &severity, &dateAndTime, &cluster and &component.
  
  - **name** - Specifies a name for the e-mail server.
  - **address** - Specifies the address of the e-mail server. Enter the SMTP server IP address or host name. For example, 10.45.45.12 or smtp.example.com.
  - **portNumber** - Specifies the port number of the e-mail server. Optional.
  - **reply_email_address/sender_address** - Specifies the sender's email address.
  - **contact_name/sender_name** - Specifies the sender's name.
  - **subject** Notifications can be customized by setting a custom header and footer or with variable like &cluster&message ## Variables: &message &messageId &severity &dateAndTime &cluster&component
  - **sender_login_id** - Login needed to authenticate sender with email server in case the login is different from the sender address (--reply). Optional.
  - **password** - Password used to authenticate sender address (--reply) or login id (--login) with the email sever | +| scale_gui_email_recipients: | none | scale_gui_email_recipients:
  name: 'name_email_recipient_name':
  address: 'email_recipient_address@email.com':
  components_security_level: 'SCALEMGMT=WARNING,CESNETWORK=WARNING':
  reports: 'DISK,GPFS,AUTH':
  quotaNotification: '--quotaNotification' ##if defined it enabled quota Notification:
  quotathreshold: '70.0' | no | **Options:**
  - **NAME**: Name of the email Recipients
  - **Address:** userAddress Specifies the address of the e-mail user
  - **Components_security_level**
     - The value scale_gui_email_recipients_components_security_level: Need to contain the **Component** and the **Warning/Security Level**
       - Chose component like **SCALEMGMT** and the security_level of WARNING wil be **SCALEMGMT=ERROR**
       - Security level: Chose the lowest severity of an event for which you want to receive and email. Example, selectin Tip includes events with severity Tip, Warning, and Error in the email.
       - The Severity level is as follows: : **INFO**, **TIP**, **WARNING**, **ERROR**
     **List of all security levels:**
      AFM=WARNING,AUTH=WARNING,BLOCK=WARNING,CESNETWORK=WARNING,CLOUDGATEWAY=WARNING,CLUSTERSTATE=WARNING,DISK=WARNING,FILEAUDITLOG=WARNING,
      FILESYSTEM=WARNING,GPFS=WARNING,GUI=WARNING,HADOOPCONNECTOR=WARNING,KEYSTONE=WARNING,MSGQUEUE=WARNING,NETWORK=WARNING,NFS=WARNING,
      OBJECT=WARNING,PERFMON=WARNING,SCALEMGMT=WARNING,SMB=WARNING,CUSTOM=WARNING,AUTH_OBJ=WARNING,CES=WARNING,CESIP=WARNING,NODE=WARNING,
      THRESHOLD=WARNING,WATCHFOLDER=WARNING,NVME=WARNING,POWERHW=WARNING
  - **Reports** listOfComponents
       - Specifies the components to be reported. The tasks generating reports are scheduled by default to send a report once per day. Optional.
       AFM,AUTH,BLOCK,CESNETWORK,CLOUDGATEWAY,CLUSTERSTATE,DISK,FILEAUDITLOG,FILESYSTEM,GPFS,GUI,HADOOPCONNECTOR,
       KEYSTONE,MSGQUEUE,NETWORK,NFS,OBJECT,PERFMON,SCALEMGMT,SMB,CUSTOM,AUTH_OBJ,CES,CESIP,NODE,THRESHOLD,WATCHFOLDER,NVME,POWERHW
  - **quotaNotification**
     Enables quota notifications which are sent out if the specified threshold is violated. (See --quotathreshold)
  - **quotathreshold** valueInPercent
     - Sets the threshold(percent of the hard limit)for including quota violations in the quota digest report.
     - The default value is 100. The values -3, -2, -1, and zero have special meaning.
     - Specify the value -2 to include all results, even entries where the hard quota not set.
     - Specify the value -1 to include all entries where hard quota is set and current usage is greater than or equal to the soft quota.
     - Specify the value -3 to include all entries where hard quota is not set and current usage is greater than or equal to the soft quota only.
     - Specify the value 0 to include all entries where the hard quota is set.
  Using unlisted options can lead to an error | +| scale_gui_snmp_notification: | false | true or false | no | Enable SNMP notifications in IBM Storage Scale GUI | +| scale_gui_snmp_server: | false | scale_gui_snmp_server:
  ip_adress: 'snmp_server_host'
  ip_port: '162'
  community: 'Public' | no | - To Configure SNMP Notification.
    - Change the Value:
      - scale_gui_snmp_notification: true
       - ip_adress to your SNMP server/host
       - ip_port to your SNMP port
       - community to your SNMP community | + +Role: NFS,SMB,OBJ - Protocol +---------------------------- + +| variables | Default | Options | User Mandatory | Descriptions | +| ------------------------ | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_install_debuginfo: | true | true or false | no | Flag to install ganesha/nfs debug package | +| scale_install_debuginfo: | true | true or false | no | Flag to install smb debug package | +| scale_protocol_node: | none | true or false | no | Enable to set node to uses as Protcol Node, by host variable. | +| scale_protocols: #IPV4 | none | scale_protocols: #IPV4
  smb: true
  nfs: true
  object: true
  export_ip_pool: [192.168.100.100,192.168.100.101]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | To install IBM Storage Scale Protocol. Refer to man mmces man pages for a description of these Cluster Export. scale_ces_groups can also be user to group nodes. | +| scale_protocols: #Ipv6 | none | scale_protocols: #Ipv6
  smb: true
  nfs: true
  object: true
  interface: [eth0]
  export_ip_pool: [2002:90b:e006:84:250:56ff:feb9:7787]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | For enabling Cluster Export Services in an IPv6 environment one also needs to define an interface parameter: scale_ces_groups can also be user to group nodes. | +| scale_ces_obj: | none | scale_ces_obj:
  dynamic_url: False
  enable_s3: False
  local_keystone: True
  enable_file_access: False
  endpoint_hostname: scale-11
  object_fileset: Object_Fileset
  pwd_file: obj_passwd.j2
  admin_user: admin
  admin_pwd: admin001
  database_pwd: admin001 | no | Missing descriptions. | + +Role: HDFS - Hadoop +------------------- + +| variables | Default | Options | User Mandatory | Descriptions | +| -------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------------------- | +| ha_enabled: | false | true or false | no | HA for namenode in HDFS? | +| scale_hdfs_clusters: | none | - name: mycluster
  filesystem: gpfs1
  namenodes: ['host-vm1.test.net', 'host-vm2.test.net']
  datanodes: ['host-vm3.test.net', 'host-vm4.test.net', 'host-vm5.test.net']
  datadir: datadir | no | Install IBM Storage Scale (HDFS), "Document more" | + +Role: zimon - Performance Monitoring +------------------------------------ + +| variables | Default | Options | User Mandatory | Descriptions | +| ---------------------- | ------- | ------------- | -------------- | ----------------------------------------------------------------------------- | +| scale_zimon_collector: | false | true or false | no | Nodes default GUI collector role, its install the collector on all GUI nodes. | +| scale_cluster_gui | false | true or false | no | Install IBM Storage Scale GUI on nodes, set by host variables. | +| scale_cluster_zimon | false | true or false | no | Install up zimon enabled | + +Role: FAL - File Audit Logging +------------------------------ + +| variables | Default | Options | User Mandatory | Descriptions | +| ---------------- | ------- | ------------- | -------------- | ------------------------------- | +| scale_fal_enable | true | true or false | no | Flag to enable fileauditlogging | + +Role: callhome - Call Home + +| variables | Default | Options | User Mandatory | Descriptions | +| ---------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_callhome_params: | None | scale_callhome_params:
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly]
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly] | no | Refer to man mmcallhome man pages for a description of these Call Homes
**scale_callhome_params**:
  is_enabled: true
  customer_name: abc
  customer_email: abc@abc.com
  customer_id: 12345
  customer_country: IN
  proxy_ip:
  proxy_port:
  proxy_user:
  proxy_password:
  proxy_location:
  callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
  callhome_group1: [scale01,scale02,scale03,scale04]
  callhome_schedule: [daily,weekly] | + +Role: remotemount_configure - Enabled and Configure Remote Mounting of Filesystem +--------------------------------------------------------------------------------- + +| variables | Default | Options | User Mandatory | Descriptions | +| ----------------------------------------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_remotemount_client_gui_username: | none | username, example admin | yes | Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_client_gui_password: | none | password for user | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_client_gui_hostname: | none | 10.10.10.1 | yes | IP or Hostname to Client GUI Node | +| scale_remotemount_storage_gui_username: | none | | yes | Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_storage_gui_password: | none | | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_storage_gui_hostname: | none | | yes | IP or Hostname to Storage GUI Node | +| scale_remotemount_storage_adminnodename: | false | true or false | no | IBM Storage Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same.
   In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true | +| scale_remotemount_filesystem_name | none | scale_remotemount_filesystem_name
  scale_remotemount_client_filesystem_name:
  scale_remotemount_client_remotemount_path:
  scale_remotemount_storage_filesystem_name:
  scale_remotemount_access_mount_attributes:
  scale_remotemount_client_mount_fs:
  scale_remotemount_client_mount_priority: 0 | yes | The variables in the list needs to be in a list, as we now support mounting up more filesystem.

  - Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names.
  - Path to where the filesystem shoul be Mounted: /gpfs01/fs1
  - Storage Cluster filesystem you want to mount: gpfs01
  - Filesystem can be mounted in different access mount: RW or RO
  - Indicates when the file system is to be mounted: options are yes, no, automount (When the file system is first accessed.)
  - File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last.
   A value of zero indicates no priority. valid values: 0 - x | +| scale_remotemount_client_filesystem_name: | none | fs1 | yes | Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names. | +| scale_remotemount_client_remotemount_path: | none | /gpfs01/fs1 | yes | Path to where the filesystem shoul be Mounted. | +| scale_remotemount_storage_filesystem_name: | none | gpfs01 | yes | Storage Cluster filesystem you want to mount | +| scale_remotemount_access_mount_attributes: | rw | RW, RO | no | Filesystem can be mounted in different access mount: RW or RO | +| scale_remotemount_client_mount_fs: | yes | yes, no, automount | no | Indicates when the file system is to be mounted:\*\* options are yes, no, automount (When the file system is first accessed.) | +| scale_remotemount_client_mount_priority: 0 | 0 | 0 - x | no | File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. | +| scale_remotemount_client_no_gui: | false | true or false | no | If Accessing/Client Cluster dont have GUI, it will use CLI/SSH against Client Cluster | +| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to pubkey | no | Client Cluster (Access) is downloading the pubkey from Owning cluster and importing it | +| scale_remotemount_cleanup_remote_mount: | false | true or false | no | Unmounts, remove the filesystem, and the connection between Accessing/Client cluster and Owner/Storage Cluster. This now works on clusters that not have GUI/RESTAPI interface on Client Cluster | +| scale_remotemount_debug: | false | true or false | no | Outputs debug information after tasks | +| scale_remotemount_forceRun: | false | true or false | no | If scale_remotemount_forceRun is passed in, then the playbook is attempting to run remote_mount role regardless of whether the filesystem is configured | +| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to | no | Client Cluster (Access) pubkey that is changed from json to right format and then used when creating connection | +| scale_remotemount_storage_pub_key_location_json | /tmp/storage_cluster_public_key_json.pub | path to | no | Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster | +| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| scale_remotemount_remotecluster_chipers: | AUTHONLY | AES128-SHA
AES256-SHA
AUTHONLY | no | Sets the security mode for communications between the current cluster and the remote cluster
Encryption can have performance effect and increased CPU usage
run **mmauth show ciphers** to check supported ciphers | +| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| scale_remotemount_validate_certs_uri: | no | no | no | If Ansible URI module should validate https certificate for IBM Storage Scale RestAPI interface. | diff --git a/VARIABLESNEW.md b/VARIABLESNEW.md deleted file mode 100644 index ba11f64b..00000000 --- a/VARIABLESNEW.md +++ /dev/null @@ -1,140 +0,0 @@ -Variables used by Spectrum Scale (GPFS) Ansible project -======================================================= - -Variables list is dived into each if the Ansible roles. - -**Role: Core - Core Spectrum Scale installation and configuration** - -| Variables | Default | Options | User Mandatory | Descriptions | -|----------------------------------------|----------------------------|------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| scale_architecture: | {{ansible_architecture}} | x86_64 or ppc64le | no | This ansible_architecture is gather from ansible get_facts module, Spectrum Scale architecture that you want to install on your nodes. | -| scale_version: | none | 5.x.x.x | yes | Specify the Spectrum Scale version that you want to install on your nodes. With 5.0.5.x. | -| scale_daemon_nodename: | {{ansible_hostname}} | none | no | Spectrum Scale daemon nodename defaults to nodes hostname | -| scale_admin_nodename: | {{ansible_hostname}} | none | no | Spectrum Scale admin nodename defaults to nodes hostname | -| scale_state: | present | present,maintenance,absent | no | Desired state of the Spectrum Scale node. present - node will be added to cluster, daemon will be started maintenance
node will be added to cluster, daemon will not be started absent - node will be removed from cluster | -| scale_prepare_disable_selinux | false | true or false | no | Whether or not to disable SELinux. | -| scale_reboot_automatic | false | true or false | no | Whether or not to automatically reboot nodes - if set to false then only a message is printed. If set to true then nodes are automatically rebooted (dangerous!). | -| scale_prepare_enable_ssh_login | false | true or false | no | Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). | -| scale_prepare_restrict_ssh_address | false | true or false | no | Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires scale_prepare_enable_ssh_login to be enabled. | -| scale_prepare_disable_ssh_hostkeycheck | false | true or false | no | Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). | -| scale_prepare_exchange_keys | false | true or false | no | Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires scale_prepare_exchange_keys to be enabled, too. | -| scale_prepare_pubkey_path | /root/.ssh/id_rsa.pub | /root/.ssh/gpfskey.pub | no | example: /root/.ssh/gpfskey.pub | -| scale_prepare_disable_firewall | default: false | true or false | no | Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to false and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). | -| scale_install_localpkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to the self-extracting Spectrum Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. | -| scale_install_remotepkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to Spectrum Scale installation package on the remote system (accessible on Ansible managed node). | -| scale_install_repository_url | none | example: http://server/gpfs/ | yes | Specify the URL of the (existing) Spectrum Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository).
Note that if this is a URL then a new repository definition will be created. If this variable is set to existing then it is assumed that a repository definition already exists and thus will *not* be created. | -| scale_install_directory_pkg_path | none | example: /tmp/gpfs/ | yes | Specify the path to the user-provided directory, containing all Spectrum Scale packages. Note that for this installation method all packages need to be kept in a single directory. | -| scale_cluster_quorum | false | true or false | no | If you dont specify any quorum nodes then the first seven hosts in your inventory will automatically be assigned the quorum role. even if this variable is false | -| scale_cluster_manager | false | true or false | no | Nodes default manager role - you ll likely want to define per-node roles in your inventory | -| scale_cluster_profile_name: | none | gpfsprotocoldefaults or gpfsprotocolrandomio | no | Specifies a predefined profile of attributes to be applied. System-defined profiles are located in /usr/lpp/mmfs/profiles/
The following system-defined profile names are accepted. gpfsprotocoldefaults and gpfsprotocolrandomio
eg. If you want to apply gpfsprotocoldefaults then specify scale_cluster_profile_name: gpfsprotocoldefaults | -| scale_cluster_profile_dir_path | /usr/lpp/mmfs/profiles/ | Path to cluster profile: example: /usr/lpp/mmfs/profiles/ | no | Fixed variable related to mmcrcluster profile. System-defined profiles are located in /usr/lpp/mmfs/profiles/ | -| scale_enable_gpg_check: | true | true or false | no | Enable/disable gpg key flag | -| scale_install_localpkg_tmpdir_path | /tmp | path to folder. | no | Temporary directory to copy installation package to (local package installation method) | -| scale_nodeclass: | none | Name of the nodeclass: example scale nodeclass: - class1 | no | Node classes can be defined on a per-node basis by defining the scale_nodeclass variable. | -| scale_config: | none | scale_config:
  - nodeclass: class1
    params:
        - pagepool: 4G
        - autoload: yes
        - ignorePrefetchLunCount: yes | no | Configuration attributes can be defined as variables for *any* host in the play
The host for which you define the configuration attribute is irrelevant. Refer to the man mmchconfig man page for a list of available configuration attributes. | -| scale_storage: | none | scale_storage:
     filesystem: gpfs01
     blockSize: 4M
     maxMetadataReplicas: 2
     defaultMetadataReplicas: 2
     maxDataReplicas: 2
     defaultDataReplicas: 2
     numNodes: 16
     automaticMountOption: true
     defaultMountPoint: /mnt/gpfs01
     disks:
          - device: /dev/sdb
            nsd: nsd_1
            servers: scale01
            failureGroup: 10
            usage: metadataOnly
            pool: system
          - device: /dev/sdc
            nsd: nsd_2
            servers: scale01
            failureGroup: 10
            usage: dataOnly
            pool: data | no | Refer to man mmchfs and man mmchnsd man pages for a description of these storage parameters.
The filesystem parameter is mandatory, servers, and the device parameter is mandatory for each of the file systems disks.
All other file system and disk parameters are optional. scale_storage *must* be define using group variables.
Do *not* define disk parameters using host variables or inline variables in your playbook.
Doing so would apply them to all hosts in the group/play, thus defining the same disk multiple times... | -| scale_admin_node | false | true or false | no | Set admin flag on node for Ansible to use. | -| scale_nsd_server | scale_nsd_server | true or false | no | Set nsd flag for installation purpose | - -**Role: GUI - GUI for Management of Spectrum Scale Cluster** - -| Variables | Default | Options | User Mandatory | Descriptions | -|-----------------------------------|----------------------------|------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| scale_gui_hide_tip_callhome | false | true or false | no | Hide the Callhome not enabled tip on gui | -| scale_cluster_gui: | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | -| scale_service_gui_start: | true | true or false | no | Wheter or not to start the Scale GUI after installation. | -| scale_gui_admin_user: | none | admin | no | Spesify a name for the admin user to be created. | -| scale_gui_admin_password: | none | Admin@GUI! | no | Password to be set on the admin user | -| scale_gui_admin_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | -| scale_gui_user_username: | none | SEC | no | Ekstra Spectrum Scale GUI user. example: Monitor or RestAPI. | -| scale_gui_user_password: | none | Storage@Scale1 | no | Password for extra user | -| scale_gui_user_role: | none | SystemAdmin | no | Role access for the extra user. | -| scale_gui_admin_hc_vault: | none | N/A | no | HasiCorp - Create local Admin user with password from vault, cant be combined with the scale_gui_admin_user | -| scale_gui_admin_hc_vault_user: | noen | admin | no | Create local admin user and write password to Vault | -| scale_gui_admin_hc_vault_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | -| scale_gui_cert_hc_vault: | false | true or false | no | Generate https Certificate from HasiCorp Vault and import it to Scale GUI.
The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normally the playbook is then run from Terraform. | -| scale_gui_password_policy_change: | false | true or false | no | Change default GUI User Password Policy change what you need in your inventory files and rest wil use default, used with **scale_gui_password_policy:** | | -| scale_gui_password_policy: | false | scale_gui_password_policy:
  minLength: 6
  maxAge: 900
  minAge: 0
  remember: 3
  minUpperChars: 0
  minLowerChars: 0
  minSpecialChars: 0
  minDigits: 0
  maxRepeat: 0
  minDiff: 1
  rejectOrAllowUserName: --rejectUserName | | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default.
  
 scale_gui_password_policy:
  minLength: 6 ## Minimum password length
  maxAge: 900 ## Maximum password age
  minAge: 0 ## Minimum password age
  remember: 3 ## Remember old passwords
  minUpperChars: 0 ## Minimum upper case characters
  minLowerChars: 0 ## Minimum lower case characters
  minSpecialChars: 0 ## Minimum special case characters
  minDigits: 0 ## Minimum digits
  maxRepeat: 0 ## Maximum number of repeat characters
  minDiff: 1 ## Minimum different characters with respect to old password
  rejectOrAllowUserName: --rejectUserName ## either --rejectUserName or --allowUserName | -| scale_gui_ldap_integration: | false | true or false | no | Active Directory information for Managing GUI users in an external AD or LDAP server | -| scale_gui_ldap: | none | scale_gui_ldap:
  name: 'myad'
  host: 'myad.mydomain.local'
  bindDn: 'CN=servicebind,CN=Users,DC=mydomain,DC=local'
  bindPassword: 'password'
  baseDn: 'CN=Users,DC=mydomain,DC=local'
  port: '389' #Default 389
  type: 'ad' #Default Microsoft Active Directory
  #securekeystore: /tmp/ad.jks #Local on GUI Node
  #secureport: '636' #Default 636 | no | Managing GUI users in an external AD or LDAP Parameters
 
 Parameter Description
  - **name:** Alias for your LDAP/AD server
  - **host:** The IP address or host name of the LDAP server.
  - **baseDn:** BasedDn string for the repository.
  - **bindDn:** BindDn string for the authentication user.
  - **bindPassword:** Password of the authentication user.
  - **port:** Port number of the LDAP. Default is 389
  - **type:** Repository type (ad, ids, domino, secureway, iplanet, netscape, edirectory or custom). Default is ad.
  - **securekeystore:** Location with file name of the keystore file (.jks, .p12 or .pfx).
  - **secureport:** Port number of the LDAP. 636 over SSL. | -| scale_gui_groups: | none | scale_gui_groups:
  administrator: 'scale-admin'
  securityadmin: 'scale-securityadmin'
  storageadmin: 'scale-storage-administrator'
  snapadmin: 'scale-snapshot-administrator'
  data_access: 'scale-data-access'
  monitor: 'scale-monitor'
  protocoladmin: 'scale-protocoladmin'
  useradmin: 'scale-useradmin' | no | The LDAP/AD Groups needs to be create in the LDAP. (You don't need created before deployment.)
You'll likely want to define this in your host inventory
Add the mappings that you want and replace the **scale-** with your ldap groups.

The following are the default user groups:
  - **Administrator** - Manages all functions on the system except those deals with managing users, user groups, and authentication.
  - **SecurityAdmin** - Manages all functions on the system, including managing users, user groups, and user authentication.
  - **SystemAdmin** - Manages clusters, nodes, alert logs, and authentication.
  - **StorageAdmin** - Manages disks, file systems, pools, filesets, and ILM policies.
  - **SnapAdmin** - Manages snapshots for file systems and filesets.
  - **DataAccess** - Controls access to data. For example, managing access control lists.
  - **Monitor** - Monitors objects and system configuration but cannot configure, modify, or manage the system or its resources.
  - **ProtocolAdmin** - Manages object storage and data export definitions of SMB and NFS protocols.
  - **UserAdmin** - Manages access for GUI users. Users who are part of this group have edit permissions only in the Access pages of the GUI. Check IBM doc for updated list | -| scale_gui_email_notification: | false | false or true | no | Enable E-mail notifications in Spectrum Scale GUI | -| scale_gui_email: | none | scale_gui_email:
  name: 'SMTP_1'
  ipaddress: 'emailserverhost'
  ipport: '25'
  replay_email_address: scale-server-test@acme.com
  contact_name: 'scale-contact-person'
  subject: &cluster&message
  sender_login_id:
  password:
  headertext:
  footertext: | no | - The email feature transmits operational and error-related data in the form of an event notification email.
  - Email notifications can be customized by setting a custom header and footer for the emails and customizing the subject by selecting and combining from the following variables:
    &message, &messageId, &severity, &dateAndTime, &cluster and &component.
  
  - **name** - Specifies a name for the e-mail server.
  - **address** - Specifies the address of the e-mail server. Enter the SMTP server IP address or host name. For example, 10.45.45.12 or smtp.example.com.
  - **portNumber** - Specifies the port number of the e-mail server. Optional.
  - **reply_email_address/sender_address** - Specifies the sender's email address.
  - **contact_name/sender_name** - Specifies the sender's name.
  - **subject** Notifications can be customized by setting a custom header and footer or with variable like &cluster&message ## Variables: &message &messageId &severity &dateAndTime &cluster&component
  - **sender_login_id** - Login needed to authenticate sender with email server in case the login is different from the sender address (--reply). Optional.
  - **password** - Password used to authenticate sender address (--reply) or login id (--login) with the email sever | -| scale_gui_email_recipients: | none | scale_gui_email_recipients:
  name: 'name_email_recipient_name':
  address: 'email_recipient_address@email.com':
  components_security_level: 'SCALEMGMT=WARNING,CESNETWORK=WARNING':
  reports: 'DISK,GPFS,AUTH':
  quotaNotification: '--quotaNotification' ##if defined it enabled quota Notification:
  quotathreshold: '70.0' | no | **Options:**
  - **NAME**: Name of the email Recipients
  - **Address:** userAddress Specifies the address of the e-mail user
  - **Components_security_level**
     - The value scale_gui_email_recipients_components_security_level: Need to contain the **Component** and the **Warning/Security Level**
       - Chose component like **SCALEMGMT** and the security_level of WARNING wil be **SCALEMGMT=ERROR**
       - Security level: Chose the lowest severity of an event for which you want to receive and email. Example, selectin Tip includes events with severity Tip, Warning, and Error in the email.
       - The Severity level is as follows: : **INFO**, **TIP**, **WARNING**, **ERROR**
     **List of all security levels:**
      AFM=WARNING,AUTH=WARNING,BLOCK=WARNING,CESNETWORK=WARNING,CLOUDGATEWAY=WARNING,CLUSTERSTATE=WARNING,DISK=WARNING,FILEAUDITLOG=WARNING,
      FILESYSTEM=WARNING,GPFS=WARNING,GUI=WARNING,HADOOPCONNECTOR=WARNING,KEYSTONE=WARNING,MSGQUEUE=WARNING,NETWORK=WARNING,NFS=WARNING,
      OBJECT=WARNING,PERFMON=WARNING,SCALEMGMT=WARNING,SMB=WARNING,CUSTOM=WARNING,AUTH_OBJ=WARNING,CES=WARNING,CESIP=WARNING,NODE=WARNING,
      THRESHOLD=WARNING,WATCHFOLDER=WARNING,NVME=WARNING,POWERHW=WARNING
  - **Reports** listOfComponents
       - Specifies the components to be reported. The tasks generating reports are scheduled by default to send a report once per day. Optional.
       AFM,AUTH,BLOCK,CESNETWORK,CLOUDGATEWAY,CLUSTERSTATE,DISK,FILEAUDITLOG,FILESYSTEM,GPFS,GUI,HADOOPCONNECTOR,
       KEYSTONE,MSGQUEUE,NETWORK,NFS,OBJECT,PERFMON,SCALEMGMT,SMB,CUSTOM,AUTH_OBJ,CES,CESIP,NODE,THRESHOLD,WATCHFOLDER,NVME,POWERHW
  - **quotaNotification**
     Enables quota notifications which are sent out if the specified threshold is violated. (See --quotathreshold)
  - **quotathreshold** valueInPercent
     - Sets the threshold(percent of the hard limit)for including quota violations in the quota digest report.
     - The default value is 100. The values -3, -2, -1, and zero have special meaning.
     - Specify the value -2 to include all results, even entries where the hard quota not set.
     - Specify the value -1 to include all entries where hard quota is set and current usage is greater than or equal to the soft quota.
     - Specify the value -3 to include all entries where hard quota is not set and current usage is greater than or equal to the soft quota only.
     - Specify the value 0 to include all entries where the hard quota is set.
  Using unlisted options can lead to an error | -| scale_gui_snmp_notification: | false | true or false | no | Enable SNMP notifications in Spectrum Scale GUI | -| scale_gui_snmp_server: | false | scale_gui_snmp_server:
  ip_adress: 'snmp_server_host'
  ip_port: '162'
  community: 'Public' | no | - To Configure SNMP Notification.
    - Change the Value:
      - scale_gui_snmp_notification: true
       - ip_adress to your SNMP server/host
       - ip_port to your SNMP port
       - community to your SNMP community | - -**Role: NFS,SMB,OBJ - Protocol** - -| variables | Default | Options | User Mandatory | Descriptions | -|--------------------------|---------|---------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| -| scale_install_debuginfo: | true | true or false | no | Flag to install ganesha/nfs debug package | -| scale_install_debuginfo: | true | true or false | no | Flag to install smb debug package | -| scale_protocol_node: | none | true or false | no | Enable to set node to uses as Protcol Node, by host variable. | -| scale_protocols: #IPV4 | none | scale_protocols: #IPV4
  smb: true
  nfs: true
  object: true
  export_ip_pool: [192.168.100.100,192.168.100.101]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | To install Spectrum Scale Protocol. Refer to man mmces man pages for a description of these Cluster Export. scale_ces_groups can also be user to group nodes. | -| scale_protocols: #Ipv6 | none | scale_protocols: #Ipv6
  smb: true
  nfs: true
  object: true
  interface: [eth0]
  export_ip_pool: [2002:90b:e006:84:250:56ff:feb9:7787]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | For enabling Cluster Export Services in an IPv6 environment one also needs to define an interface parameter: scale_ces_groups can also be user to group nodes. | -| scale_ces_obj: | none | scale_ces_obj:
  dynamic_url: False
  enable_s3: False
  local_keystone: True
  enable_file_access: False
  endpoint_hostname: scale-11
  object_fileset: Object_Fileset
  pwd_file: obj_passwd.j2
  admin_user: admin
  admin_pwd: admin001
  database_pwd: admin001 | no | Missing descriptions. - - - -**Role: HDFS - Hadoop** - - -| variables | Default | Options | User Mandatory | Descriptions | -|----------------------|---------|-----------------------------|----------------|---------------------------------------------------------| -| ha_enabled: | false | true or false | no | HA for namenode in HDFS? | -| scale_hdfs_clusters: | none | - name: mycluster
  filesystem: gpfs1
  namenodes: ['host-vm1.test.net', 'host-vm2.test.net']
  datanodes: ['host-vm3.test.net', 'host-vm4.test.net', 'host-vm5.test.net']
  datadir: datadir | no | Install IBM Spectrum Scale (HDFS), "Document more" - - -**Role: zimon - Performance Monitoring** - -| variables | Default | Options | User Mandatory | Descriptions | -|------------------------|---------|---------------|----------------|-------------------------------------------------------------------------------| -| scale_zimon_collector: | false | true or false | no | Nodes default GUI collector role, its install the collector on all GUI nodes. | -| scale_cluster_gui | false | true or false | no | Install Spectrum Scale GUI on nodes, set by host variables. | -| scale_cluster_zimon | false | true or false | no | Install up zimon enabled | - - -**Role: Fal - FileAudit Logging** - -| variables | Default | Options | User Mandatory | Descriptions | -|------------------------|---------|----------------|----------------|------------------------------------------------------------------------------| -| scale_fal_enable | true | true or false | no | Flag to enable fileauditlogging | - -**Role: CallHome** - -| variables | Default | Options | User Mandatory | Descriptions | -|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|----------------|--------------| -|scale_callhome_params: | None | scale_callhome_params:
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly]
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly] | no | Refer to man mmcallhome man pages for a description of these Call Homes
**scale_callhome_params**:
  is_enabled: true
  customer_name: abc
  customer_email: abc@abc.com
  customer_id: 12345
  customer_country: IN
  proxy_ip:
  proxy_port:
  proxy_user:
  proxy_password:
  proxy_location:
  callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
  callhome_group1: [scale01,scale02,scale03,scale04]
  callhome_schedule: [daily,weekly] | - - -**Role: remotemount_configure - Enabled and Configure Remote Mounting of Filesystem** - -| variables | Default | Options | User Mandatory | Descriptions | -|-------------------------------------------------|-------------------------------------------------|-------------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| scale_remotemount_client_gui_username: | none | username, example admin | yes | Scale User with Administrator or ContainerOperator role/rights | -| scale_remotemount_client_gui_password: | none | password for user | yes | Password for Scale User with Administrator or ContainerOperator role/rights | -| scale_remotemount_client_gui_hostname: | none | 10.10.10.1 | yes | IP or Hostname to Client GUI Node | -| scale_remotemount_storage_gui_username: | none | | yes | Scale User with Administrator or ContainerOperator role/rights | -| scale_remotemount_storage_gui_password: | none | | yes | Password for Scale User with Administrator or ContainerOperator role/rights | -| scale_remotemount_storage_gui_hostname: | none | | yes | IP or Hostname to Storage GUI Node | -| scale_remotemount_storage_adminnodename: | false | true or false | no | Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same.
   In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true | -| scale_remotemount_filesystem_name | none | scale_remotemount_filesystem_name
  scale_remotemount_client_filesystem_name:
  scale_remotemount_client_remotemount_path:
  scale_remotemount_storage_filesystem_name:
  scale_remotemount_access_mount_attributes:
  scale_remotemount_client_mount_fs:
  scale_remotemount_client_mount_priority: 0 | yes | The variables in the list needs to be in a list, as we now support mounting up more filesystem.

  - Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names.
  - Path to where the filesystem shoul be Mounted: /gpfs01/fs1
  - Storage Cluster filesystem you want to mount: gpfs01
  - Filesystem can be mounted in different access mount: RW or RO
  - Indicates when the file system is to be mounted: options are yes, no, automount (When the file system is first accessed.)
  - File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last.
   A value of zero indicates no priority. valid values: 0 - x | -| -   scale_remotemount_client_filesystem_name: | none | fs1 | yes | Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names. | -| -   scale_remotemount_client_remotemount_path: | none | /gpfs01/fs1 | yes | Path to where the filesystem shoul be Mounted. | -| -   scale_remotemount_storage_filesystem_name: | none | gpfs01 | yes | Storage Cluster filesystem you want to mount | -| -   scale_remotemount_access_mount_attributes: | rw | RW, RO | no | Filesystem can be mounted in different access mount: RW or RO | -| -   scale_remotemount_client_mount_fs: | yes | yes, no, automount | no | Indicates when the file system is to be mounted:** options are yes, no, automount (When the file system is first accessed.) | -| -   scale_remotemount_client_mount_priority: 0 | 0 | 0 - x | no | File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. | -| - | | | | | -| scale_remotemount_client_no_gui: | false | true or false | no | If Accessing/Client Cluster dont have GUI, it will use CLI/SSH against Client Cluster | -| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to pubkey | no | Client Cluster (Access) is downloading the pubkey from Owning cluster and importing it | -| scale_remotemount_cleanup_remote_mount: | false | true or false | no | Unmounts, remove the filesystem, and the connection between Accessing/Client cluster and Owner/Storage Cluster. This now works on clusters that not have GUI/RESTAPI interface on Client Cluster | -| scale_remotemount_debug: | false | true or false | no | Outputs debug information after tasks | -| scale_remotemount_forceRun: | false | true or false | no | If scale_remotemount_forceRun is passed in, then the playbook is attempting to run remote_mount role regardless of whether the filesystem is configured | -| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to | no | Client Cluster (Access) pubkey that is changed from json to right format and then used when creating connection | -| scale_remotemount_storage_pub_key_location_json | /tmp/storage_cluster_public_key_json.pub | path to | no | Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster | -| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | -| scale_remotemount_remotecluster_chipers: | AUTHONLY | AES128-SHA
AES256-SHA
AUTHONLY | no | Sets the security mode for communications between the current cluster and the remote cluster
Encryption can have performance effect and increased CPU usage
run **mmauth show ciphers** to check supported ciphers -| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | -| scale_remotemount_validate_certs_uri: | no | no | no | If Ansible URI module should validate https certificate for Spectrum Scale RestAPI interface. From d93d2f9b9604583ada7b5b6f5e043d49d73c2872 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 11:29:50 +0100 Subject: [PATCH 103/113] Fix link to variables documentation Signed-off-by: Achim Christ --- MIGRATING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MIGRATING.md b/MIGRATING.md index 336ec565..de60c6a6 100644 --- a/MIGRATING.md +++ b/MIGRATING.md @@ -55,7 +55,7 @@ The following steps need to be taken in order to consume the `main` branch in yo Refer to the [name mapping table](#role-name-mapping-table) for a list of new role names. -- Some variables have been renamed for consistency as well, but it's expected that these changes only affect very few users. See [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details, and refer to [VARIABLESNEW.md](VARIABLESNEW.md) for a complete listing of all available variables. +- Some variables have been renamed for consistency as well, but it's expected that these changes only affect very few users. See [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details, and refer to [VARIABLES.md](VARIABLES.md) for a complete listing of all available variables. ## Role Name Mapping Table From 59760e2d4b103548076e64f3e00340f50531d678 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 11:32:04 +0100 Subject: [PATCH 104/113] Code formatting Signed-off-by: Achim Christ --- README.md | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 6645f654..f44449ce 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -__Important__: You are viewing the `main` branch of this repository. If you've previously used the `master` branch in your own playbooks then you will need to make some changes in order to switch to the `main` branch. See [MIGRATING.md](MIGRATING.md) for details. +**Important**: You are viewing the `main` branch of this repository. If you've previously used the `master` branch in your own playbooks then you will need to make some changes in order to switch to the `main` branch. See [MIGRATING.md](MIGRATING.md) for details. -* * * +--- IBM Storage Scale (GPFS) Deployment using Ansible Roles ======================================================= @@ -23,21 +23,23 @@ Ansible project with multiple roles for installing and configuring IBM Storage S - [Disclaimer](#disclaimer) - [Copyright and License](#copyright-and-license) - Features -------- #### Infrastructure minimal tested configuration + - [x] Pre-built infrastructure (using a static inventory file) - [ ] Dynamic inventory file #### OS support + - [x] Support for RHEL 7 on x86_64, PPC64 and PPC64LE - [x] Support for RHEL 8 on x86_64 and PPC64LE - [x] Support for UBUNTU 20 on x86_64 and PPC64LE - [x] Support for SLES 15 on x86_64 and PPC64LE #### Common prerequisites + - [x] Disable SELinux (`scale_prepare_disable_selinux: true`), by default false - [x] Disable firewall (`scale_prepare_disable_firewall: true`), by default true. - [ ] Disable firewall ports @@ -154,7 +156,6 @@ Users need to have a basic understanding of the [Ansible concepts](https://docs. Repeat this process for all nodes to themselves and to all other nodes. - Installation Instructions ------------------------- @@ -216,7 +217,7 @@ Installation Instructions - core_install - core_configure - core_verify - ``` + ``` Again, this is just a minimal example. There are different installation methods available, each offering a specific set of options: @@ -240,10 +241,10 @@ Installation Instructions ```shell $ cd samples/ $ ./ansible.sh - ``` + ``` > **Note:** - An advantage of using the automation script is that it will generate log files based on the date and the time in the `/tmp` directory. + > An advantage of using the automation script is that it will generate log files based on the date and the time in the `/tmp` directory. - **Playbook execution screen** @@ -264,7 +265,7 @@ Installation Instructions ok: [scale04] ok: [scale05] - TASK [common : check | Check Spectrum Scale version] + TASK [common : check | Check Spectrum Scale version] ********************************************************************************************************* ok: [scale01] ok: [scale02] @@ -287,7 +288,6 @@ Installation Instructions scale05 : ok=0 changed=59 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` - Optional Role Variables ----------------------- @@ -304,7 +304,7 @@ Available Roles The following [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html) are available for you to reuse when assembling your own [playbook](https://docs.ansible.com/ansible/latest/user_guide/playbooks.html): -- [Core GPFS](roles/core)* +- [Core GPFS](roles/core)\* - [GPFS GUI](roles/gui) - [GPFS SMB](roles/smb) - [GPFS NFS](roles/nfs) @@ -335,21 +335,20 @@ You can create multiple clusters by running multiple plays. Note that you will n ```yaml - name: Create one cluster hosts: cluster01 - roles: - ... + roles: ... + - name: Refresh inventory to clear dynamic groups hosts: localhost connection: local gather_facts: false tasks: - meta: refresh_inventory + - name: Create another cluster hosts: cluster02 - roles: - ... + roles: ... ``` - Limitations ----------- @@ -369,13 +368,11 @@ Reporting Issues and Feedback Please use the [issue tracker](https://github.com/IBM/ibm-spectrum-scale-install-infra/issues) to ask questions, report bugs and request features. - Contributing Code ----------------- We welcome contributions to this project, see [CONTRIBUTING.md](CONTRIBUTING.md) for more details. - Disclaimer ---------- From abb62bdf5248ff10ef960badcc0d9fe94f69d4e8 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 11:33:12 +0100 Subject: [PATCH 105/113] Improve wording Signed-off-by: Achim Christ --- README.md | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index f44449ce..8f037838 100644 --- a/README.md +++ b/README.md @@ -41,12 +41,11 @@ Features #### Common prerequisites - [x] Disable SELinux (`scale_prepare_disable_selinux: true`), by default false -- [x] Disable firewall (`scale_prepare_disable_firewall: true`), by default true. -- [ ] Disable firewall ports +- [x] Disable firewall (`scale_prepare_disable_firewall: true`), by default false. - [ ] Install and start NTP - [ ] Create /etc/hosts mappings - [ ] Open firewall ports -- [x] Generate SSH key +- [x] Generate SSH keys - [x] User must set up base OS repositories #### Core IBM Storage Scale prerequisites @@ -62,23 +61,24 @@ Features - [x] Compile or install pre-compiled Linux kernel extension (mmbuildgpl) - [x] Configure client and server license - [x] Assign default quorum (maximum 7 quorum nodes) if user has not defined in the inventory -- [x] Assign default manager nodes(all nodes will act as manager node) if user has not defined in the inventory +- [x] Assign default manager nodes (all nodes will act as manager nodes) if user has not defined in the inventory - [x] Create new cluster (mmcrcluster -N /var/mmfs/tmp/NodeFile -C {{ scale_cluster_clustername }}) - [x] Create cluster with profiles -- [x] Create Cluster with daemon and admin network +- [x] Create cluster with daemon and admin network - [x] Add new node into existing cluster - [x] Configure node classes - [x] Define configuration parameters based on node classes - [x] Configure NSDs and file system - [ ] Configure NSDs without file system -- [x] Extend NSDs and file system -- [x] Add disks to existing file systems +- [x] Add NSDs +- [x] Add disks to existing file system #### IBM Storage Scale Management GUI features - [x] Install IBM Storage Scale management GUI packages on designated GUI nodes +- [x] Maximum 3 GUI nodes to be configured - [x] Install performance monitoring sensor packages on all Linux nodes -- [x] Install performance monitoring packages on all GUI designated nodes +- [x] Install performance monitoring collector on all designated GUI nodes - [x] Configure performance monitoring and collectors - [ ] Configure HA federated mode collectors @@ -94,7 +94,6 @@ Features - [x] CES IPV4 or IPV6 support - [x] CES interface mode support - Minimal tested Versions ----------------------- @@ -114,9 +113,8 @@ The following IBM Spectrum Scale versions are tested: Specific OS requirements: -- For CES (SMB/NFS) on SLES15, Python 3 is required. -- For CES (OBJECT) RhedHat 8.x is required. - +- For CES (SMB/NFS) on SLES15: Python 3 is required. +- For CES (Object): RhedHat 8.x is required. Prerequisites ------------- @@ -318,13 +316,12 @@ Note that [Core GPFS](roles/core) is the only mandatory role, all other roles ar - Configure Graphical User Interface (GUI) (see [samples/playbook_gui.yml](samples/playbook_gui.yml)) - Configure Protocol Services (SMB & NFS) (see [samples/playbook_ces.yml](samples/playbook_ces.yml)) - Configure Protocol Services (HDFS) (see [samples/playbook_ces_hdfs.yml](samples/playbook_ces_hdfs.yml)) -- Configure Protocol Services (OBJECT) (see [samples/playbook_ces_object.yml](samples/playbook_ces_object.yml)) +- Configure Protocol Services (Object) (see [samples/playbook_ces_object.yml](samples/playbook_ces_object.yml)) - Configure Call Home (see [samples/playbook_callhome.yml](samples/playbook_callhome.yml)) - Configure File Audit Logging (see [samples/playbook_fileauditlogging.yml](samples/playbook_fileauditlogging.yml)) - Configure cluster with daemon and admin network (see [samples/daemon_admin_network](samples/daemon_admin_network)) - Configure remotely mounted filesystems (see [samples/playbook_remote_mount.yml](samples/playbook_remote_mount.yml)) - Cluster Membership ------------------ @@ -352,9 +349,7 @@ You can create multiple clusters by running multiple plays. Note that you will n Limitations ----------- -The roles in this project can (currently) be used to create new clusters or extend existing clusters. Similarly, new file systems can be created or extended. But this role does *not* remove existing nodes, disks, file systems or node classes. This is done on purpose — and this is also the reason why it can not be used, for example, to change the file system pool of a disk. Changing the pool requires you to remove and then re-add the disk from a file system, which is not currently in the scope of this role. - -Furthermore, upgrades are not currently in scope of this role. Spectrum Scale supports rolling online upgrades (by taking down one node at a time), but this requires careful planning and monitoring and might require manual intervention in case of unforeseen problems. +The roles in this project can (currently) be used to create new clusters or extend existing clusters. Similarly, new file systems can be created or extended. But this project does _not_ remove existing nodes, disks, file systems or node classes. This is done on purpose — and this is also the reason why it can not be used, for example, to change the file system pool of a disk. Changing the pool requires you to remove and then re-add the disk from a file system, which is not currently in the scope of this project. Furthermore, upgrades are not currently in scope of this role. IBM Storage Scale supports rolling online upgrades (by taking down one node at a time), but this requires careful planning and monitoring and might require manual intervention in case of unforeseen problems. @@ -376,10 +371,9 @@ We welcome contributions to this project, see [CONTRIBUTING.md](CONTRIBUTING.md) Disclaimer ---------- -Please note: all playbooks / modules / resources in this repo are released for use "AS IS" without any warranties of any kind, including, but not limited to their installation, use, or performance. We are not responsible for any damage or charges or data loss incurred with their use. You are responsible for reviewing and testing any scripts you run thoroughly before use in any production environment. This content is subject to change without notice. - +Please note: all roles / playbooks / modules / resources in this repository are released for use "AS IS" without any warranties of any kind, including, but not limited to their installation, use, or performance. We are not responsible for any damage or charges or data loss incurred with their use. You are responsible for reviewing and testing any scripts you run thoroughly before use in any production environment. This content is subject to change without notice. Copyright and License --------------------- -Copyright IBM Corporation 2020, released under the terms of the [Apache License 2.0](LICENSE). +Copyright IBM Corporation, released under the terms of the [Apache License 2.0](LICENSE). From 93fd5d8783293ba8cbe8d20daabb60473b842585 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 11:33:45 +0100 Subject: [PATCH 106/113] Refer to Release Notes for detailled version information Signed-off-by: Achim Christ --- README.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 8f037838..907bc42d 100644 --- a/README.md +++ b/README.md @@ -100,16 +100,14 @@ Minimal tested Versions The following Ansible versions are tested: - 2.9 and above +- **Refer to the [Release Notes](https://github.com/IBM/ibm-spectrum-scale-install-infra/releases) for details** -The following IBM Spectrum Scale versions are tested: +The following IBM Storage Scale versions are tested: -- 5.0.4.0 -- 5.0.4.1 -- 5.0.4.2 -- 5.0.5.X -- 5.0.5.2 For CES (SMB and NFS) -- 5.1.0.0 -- 5.1.1.0 with Object +- 5.0.4.0 and above +- 5.0.5.2 and above for CES (SMB and NFS) +- 5.1.1.0 and above for CES (Object) +- **Refer to the [Release Notes](https://github.com/IBM/ibm-spectrum-scale-install-infra/releases) for details** Specific OS requirements: From 322f3b426a3fe10d8d630aa7c624e661ca0001e4 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 11:42:35 +0100 Subject: [PATCH 107/113] Replace broken links Signed-off-by: Achim Christ --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 907bc42d..b37d74d8 100644 --- a/README.md +++ b/README.md @@ -300,14 +300,15 @@ Available Roles The following [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html) are available for you to reuse when assembling your own [playbook](https://docs.ansible.com/ansible/latest/user_guide/playbooks.html): -- [Core GPFS](roles/core)\* -- [GPFS GUI](roles/gui) -- [GPFS SMB](roles/smb) -- [GPFS NFS](roles/nfs) -- [GPFS OBJECT](roles/scale_object) -- [GPFS HDFS](roles/scale_hdfs) -- [GPFS Call Home](roles/callhome) -- [GPFS File Audit Logging](roles/scale_fileauditlogging) +- Core GPFS (`roles/core_*`)\* +- GUI (`roles/gui_*`) +- SMB (`roles/smb_*`) +- NFS (`roles/nfs_*`) +- Object (`roles/obj_*`) +- HDFS (`roles/hdfs_*`) +- Call Home (`roles/callhome_*`) +- File Audit Logging (`roles/fal_*`) +- ... Note that [Core GPFS](roles/core) is the only mandatory role, all other roles are optional. Each of the optional roles requires additional configuration variables. Browse the examples in the [samples/](samples/) directory to learn how to: From 167459be407c64b3126f428cff41c13d886f4fd6 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Mon, 20 Mar 2023 17:40:29 +0100 Subject: [PATCH 108/113] Fix ansible-core 2.10 deprecation warnings Signed-off-by: Achim Christ --- roles/core_common/tasks/check.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/core_common/tasks/check.yml b/roles/core_common/tasks/check.yml index 15b96601..bd1bf5be 100644 --- a/roles/core_common/tasks/check.yml +++ b/roles/core_common/tasks/check.yml @@ -91,15 +91,15 @@ # set dynamic variable based on supported OS - name: check | Set variables based on yum/dnf based OS - include: yum/set_vars.yml + include_tasks: yum/set_vars.yml when: ansible_distribution in scale_rhel_distribution - name: check | Set variables based on apt based os - include: apt/set_vars.yml + include_tasks: apt/set_vars.yml when: ansible_distribution in scale_ubuntu_distribution - name: check | Set variables based on zypper based OS - include: zypper/set_vars.yml + include_tasks: zypper/set_vars.yml when: ansible_distribution in scale_sles_distribution # Copy and import gpg key on RHEL and SLES if gpfs version >= 5.0.5.0 From 6d9377f60d36d60d37b1c387fa910bd828f90067 Mon Sep 17 00:00:00 2001 From: Achim Christ Date: Thu, 23 Mar 2023 17:04:38 +0100 Subject: [PATCH 109/113] Modernize wait-for-server handler Signed-off-by: Achim Christ --- roles/core_prepare/handlers/main.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/roles/core_prepare/handlers/main.yml b/roles/core_prepare/handlers/main.yml index 4b4ca3a7..8330d9a0 100644 --- a/roles/core_prepare/handlers/main.yml +++ b/roles/core_prepare/handlers/main.yml @@ -23,11 +23,7 @@ ignore_errors: true - name: wait-for-server - wait_for: - host: "{{ ansible_default_ipv4.address }}" - port: 22 - state: started + wait_for_connection: delay: 45 timeout: 300 delegate_to: localhost -# handlers file for precheck From d56ffa4b8addad2cce17826812e63cf7ffe8d1ff Mon Sep 17 00:00:00 2001 From: rajanmis Date: Fri, 21 Apr 2023 08:35:17 +0200 Subject: [PATCH 110/113] GPGKey rename change Signed-off-by: Rajan Mishra --- roles/core_common/defaults/main.yml | 2 ++ roles/core_common/tasks/check.yml | 12 +++++++++++- roles/core_common/vars/main.yml | 4 ++-- roles/core_install/tasks/install_dir_pkg.yml | 4 ++-- roles/core_install/tasks/install_local_pkg.yml | 2 +- roles/core_install/tasks/install_remote_pkg.yml | 2 +- 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/roles/core_common/defaults/main.yml b/roles/core_common/defaults/main.yml index 8289e317..425e0efa 100644 --- a/roles/core_common/defaults/main.yml +++ b/roles/core_common/defaults/main.yml @@ -35,3 +35,5 @@ scale_install_localpkg_tmpdir_path: /tmp ## Enable/disable gpg key flag scale_enable_gpg_check: true +## Storage Scale GPG key filename +scale_gpg_key_name: "SpectrumScale_public_key.pgp" diff --git a/roles/core_common/tasks/check.yml b/roles/core_common/tasks/check.yml index 15b96601..b6aa9e39 100644 --- a/roles/core_common/tasks/check.yml +++ b/roles/core_common/tasks/check.yml @@ -102,6 +102,16 @@ include: zypper/set_vars.yml when: ansible_distribution in scale_sles_distribution +- name: check | Storage Scale GPG key + set_fact: + scale_gpg_key_name: "Storage_Scale_public_key.pgp" + when: scale_version is defined and scale_version >= "5.1.8.0" + +- name: check | Storage Scale GPG key + set_fact: + scale_gpg_key_name: "SpectrumScale_public_key.pgp" + when: scale_version is defined and scale_version == "5.1.9.0" + # Copy and import gpg key on RHEL and SLES if gpfs version >= 5.0.5.0 - block: - name: check | Copy key @@ -112,7 +122,7 @@ - rpm_key: state: present - key: "{{ scale_gpgKey_dest }}/SpectrumScale_public_key.pgp" + key: "{{ scale_gpgKey_dest }}/{{ scale_gpg_key_name }}" when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0" and scale_install_repository_url is defined) diff --git a/roles/core_common/vars/main.yml b/roles/core_common/vars/main.yml index 0b5e9ad8..54424c53 100644 --- a/roles/core_common/vars/main.yml +++ b/roles/core_common/vars/main.yml @@ -40,8 +40,8 @@ scale_sles_distribution: ## Specify package extraction path and gpg key path scale_extracted_default_path: "/usr/lpp/mmfs" scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" -scale_gpgKey_src: "/usr/lpp/mmfs/{{ scale_version }}/Public_Keys/SpectrumScale_public_key.pgp" -scale_gpgKey_repository_src: "{{ scale_install_repository_url }}Public_Keys/SpectrumScale_public_key.pgp" +scale_gpgKey_src: "/usr/lpp/mmfs/{{ scale_version }}/Public_Keys/{{ scale_gpg_key_name }}" +scale_gpgKey_repository_src: "{{ scale_install_repository_url }}Public_Keys/{{ scale_gpg_key_name }}" scale_gpgKey_dest: "/root/" scale_install_gpgcheck: "yes" scale_disable_gpgcheck: "no" diff --git a/roles/core_install/tasks/install_dir_pkg.yml b/roles/core_install/tasks/install_dir_pkg.yml index 71c6a3d3..f4fc360e 100644 --- a/roles/core_install/tasks/install_dir_pkg.yml +++ b/roles/core_install/tasks/install_dir_pkg.yml @@ -55,7 +55,7 @@ - name: Import a gpg key from a file rpm_key: state: present - key: "{{ dir_path }}/SpectrumScale_public_key.pgp" + key: "{{ dir_path }}/{{ scale_gpg_key_name }}" when: - ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") @@ -64,7 +64,7 @@ - name: Import a gpg key from a file rpm_key: state: present - key: "{{ scale_gpgkey_path }}/SpectrumScale_public_key.pgp" + key: "{{ scale_gpgkey_path }}/{{ scale_gpg_key_name }}" when: - ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") diff --git a/roles/core_install/tasks/install_local_pkg.yml b/roles/core_install/tasks/install_local_pkg.yml index 8c621053..dd93e952 100644 --- a/roles/core_install/tasks/install_local_pkg.yml +++ b/roles/core_install/tasks/install_local_pkg.yml @@ -110,7 +110,7 @@ - rpm_key: state: present - key: "{{ scale_gpgKey_dest }}SpectrumScale_public_key.pgp" + key: "{{ scale_gpgKey_dest }}{{ scale_gpg_key_name }}" when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") diff --git a/roles/core_install/tasks/install_remote_pkg.yml b/roles/core_install/tasks/install_remote_pkg.yml index f7885fd1..fb0274a4 100644 --- a/roles/core_install/tasks/install_remote_pkg.yml +++ b/roles/core_install/tasks/install_remote_pkg.yml @@ -81,7 +81,7 @@ - rpm_key: state: present - key: "{{ scale_gpgKey_dest }}SpectrumScale_public_key.pgp" + key: "{{ scale_gpgKey_dest }}{{ scale_gpg_key_name }}" when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") From becc0c0c0f3f9bb3f11368a9497cdcfd6784fe0d Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Thu, 27 Apr 2023 11:19:46 +0200 Subject: [PATCH 111/113] Public key rename fix Signed-off-by: Rajan Mishra --- roles/core_common/tasks/check.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/roles/core_common/tasks/check.yml b/roles/core_common/tasks/check.yml index b6aa9e39..ce7f7e05 100644 --- a/roles/core_common/tasks/check.yml +++ b/roles/core_common/tasks/check.yml @@ -107,11 +107,6 @@ scale_gpg_key_name: "Storage_Scale_public_key.pgp" when: scale_version is defined and scale_version >= "5.1.8.0" -- name: check | Storage Scale GPG key - set_fact: - scale_gpg_key_name: "SpectrumScale_public_key.pgp" - when: scale_version is defined and scale_version == "5.1.9.0" - # Copy and import gpg key on RHEL and SLES if gpfs version >= 5.0.5.0 - block: - name: check | Copy key From dda59f9f42e5e49ce613e196f92654b2fa57b84f Mon Sep 17 00:00:00 2001 From: Rajan Mishra Date: Mon, 3 Jul 2023 17:17:56 +0200 Subject: [PATCH 112/113] Added option for NSDUsage and Storage pool for ECE Signed-off-by: Rajan Mishra --- roles/ece_configure/tasks/create_vdisk.yml | 24 ++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/roles/ece_configure/tasks/create_vdisk.yml b/roles/ece_configure/tasks/create_vdisk.yml index e239dbc7..55b25569 100644 --- a/roles/ece_configure/tasks/create_vdisk.yml +++ b/roles/ece_configure/tasks/create_vdisk.yml @@ -6,6 +6,22 @@ changed_when: false failed_when: false + - name: create | Initialize + set_fact: + extra_option_flag: "" + + - name: create | Set NSD usase if it is defined + set_fact: + extra_option_flag: "{{ extra_option_flag }} --nsd-usage {{ item.nsdUsage }}" + when: + - item.nsdUsage is defined + + - name: create | Set Storage pool if it is defined + set_fact: + extra_option_flag: "{{ extra_option_flag }} --storage-pool {{ item.poolName }}" + when: + - item.poolName is defined + - name: create | Define Vdiskset vars: current_vs: "{{ item.vdisk | default('vs_' + (item.rg | regex_replace('\\W', '_')) | basename) }}" @@ -13,7 +29,7 @@ current_code: "{{ item.ec }}" current_bs: "{{ item.blocksize }}" current_size: "{{ item.Size }}" - command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }}" + command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }} {{ extra_option_flag }}" register: scale_vs_define failed_when: scale_vs_define.rc != 0 when: @@ -33,7 +49,7 @@ current_bs: "{{ item.blocksize }}" current_size: "{{ item.Size }}" extra_option: "{{ item.da }}" - command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }} --da {{ extra_option }}" + command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }} --da {{ extra_option }} {{ extra_option_flag }}" register: scale_vs_define failed_when: scale_vs_define.rc != 0 when: @@ -54,6 +70,10 @@ when: - item.vdisk is defined + - debug: + msg: "{{ scale_vs_define.cmd }}" + when: scale_vs_define.cmd is defined + - name: create | Add vdisks to desire filesystem debug: msg: Vdisks created, add them to your filesystem using mmadddisk From b60e8bfd826a37109e9741e3cac1aca0bca188b7 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 4 Jul 2023 11:46:19 +0200 Subject: [PATCH 113/113] Install pm-ganesha packages on local package install for sles. Add to commit file : Signed-off-by: Christoph Keil chkeil@de.ibm.com --- roles/nfs_install/tasks/install_local_pkg.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/nfs_install/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml index 3867582a..8a0236da 100644 --- a/roles/nfs_install/tasks/install_local_pkg.yml +++ b/roles/nfs_install/tasks/install_local_pkg.yml @@ -337,7 +337,6 @@ assert: that: scale_install_gpfs_nfs_pm.matched > 0 msg: "No GPFS utils (gpfs.pm-ganesha) package found {{ nfs_extracted_path }}/{{ scale_zimon_url }}gpfs.pm-ganesha*" - when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_ubuntu_distribution - block: ## when: host is defined as a protocol node @@ -371,7 +370,6 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_nfs_pm.files }}" - when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_ubuntu_distribution - block: - name: initialize