|
13 | 13 | short_description: Manage pacemaker clusters |
14 | 14 | author: |
15 | 15 | - Mathieu Bultel (@matbu) |
| 16 | + - Dexter Le (@munchtoast) |
16 | 17 | description: |
17 | 18 | - This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI. |
18 | 19 | extends_documentation_fragment: |
|
26 | 27 | state: |
27 | 28 | description: |
28 | 29 | - Indicate desired state of the cluster. |
29 | | - choices: [cleanup, offline, online, restart] |
| 30 | + - The value V(maintenance) has been added in community.general 11.1.0. |
| 31 | + choices: [cleanup, offline, online, restart, maintenance] |
30 | 32 | type: str |
31 | | - node: |
| 33 | + name: |
32 | 34 | description: |
33 | 35 | - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status |
34 | 36 | of all nodes. |
35 | 37 | type: str |
| 38 | + aliases: ['node'] |
36 | 39 | timeout: |
37 | 40 | description: |
38 | | - - Timeout when the module should considered that the action has failed. |
39 | | - default: 300 |
| 41 | + - Timeout period (in seconds) for polling the cluster operation. |
40 | 42 | type: int |
| 43 | + default: 300 |
41 | 44 | force: |
42 | 45 | description: |
43 | 46 | - Force the change of the cluster state. |
|
63 | 66 | returned: always |
64 | 67 | """ |
65 | 68 |
|
66 | | -import time |
67 | | - |
68 | | -from ansible.module_utils.basic import AnsibleModule |
69 | | - |
70 | | - |
71 | | -_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" |
72 | | - |
73 | | - |
74 | | -def get_cluster_status(module): |
75 | | - cmd = ["pcs", "cluster", "status"] |
76 | | - rc, out, err = module.run_command(cmd) |
77 | | - if out in _PCS_CLUSTER_DOWN: |
78 | | - return 'offline' |
79 | | - else: |
80 | | - return 'online' |
81 | | - |
82 | | - |
83 | | -def get_node_status(module, node='all'): |
84 | | - node_l = ["all"] if node == "all" else [] |
85 | | - cmd = ["pcs", "cluster", "pcsd-status"] + node_l |
86 | | - rc, out, err = module.run_command(cmd) |
87 | | - if rc == 1: |
88 | | - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) |
89 | | - status = [] |
90 | | - for o in out.splitlines(): |
91 | | - status.append(o.split(':')) |
92 | | - return status |
| 69 | +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper |
| 70 | +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode |
93 | 71 |
|
94 | 72 |
|
95 | | -def clean_cluster(module, timeout): |
96 | | - cmd = ["pcs", "resource", "cleanup"] |
97 | | - rc, out, err = module.run_command(cmd) |
98 | | - if rc == 1: |
99 | | - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) |
100 | | - |
101 | | - |
102 | | -def set_cluster(module, state, timeout, force): |
103 | | - if state == 'online': |
104 | | - cmd = ["pcs", "cluster", "start"] |
105 | | - if state == 'offline': |
106 | | - cmd = ["pcs", "cluster", "stop"] |
107 | | - if force: |
108 | | - cmd = cmd + ["--force"] |
109 | | - rc, out, err = module.run_command(cmd) |
110 | | - if rc == 1: |
111 | | - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) |
112 | | - |
113 | | - t = time.time() |
114 | | - ready = False |
115 | | - while time.time() < t + timeout: |
116 | | - cluster_state = get_cluster_status(module) |
117 | | - if cluster_state == state: |
118 | | - ready = True |
119 | | - break |
120 | | - if not ready: |
121 | | - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) |
| 73 | +class PacemakerCluster(StateModuleHelper): |
| 74 | + module = dict( |
| 75 | + argument_spec=dict( |
| 76 | + state=dict(type='str', choices=[ |
| 77 | + 'cleanup', 'offline', 'online', 'restart', 'maintenance']), |
| 78 | + name=dict(type='str', aliases=['node']), |
| 79 | + timeout=dict(type='int', default=300), |
| 80 | + force=dict(type='bool', default=True) |
| 81 | + ), |
| 82 | + supports_check_mode=True, |
| 83 | + ) |
| 84 | + default_state = "" |
| 85 | + |
| 86 | + def __init_module__(self): |
| 87 | + self.runner = pacemaker_runner(self.module) |
| 88 | + self.vars.set('apply_all', True if not self.module.params['name'] else False) |
| 89 | + get_args = dict([('cli_action', 'cluster'), ('state', 'status'), ('name', None), ('apply_all', self.vars.apply_all)]) |
| 90 | + if self.module.params['state'] == "maintenance": |
| 91 | + get_args['cli_action'] = "property" |
| 92 | + get_args['state'] = "config" |
| 93 | + get_args['name'] = "maintenance-mode" |
| 94 | + elif self.module.params['state'] == "cleanup": |
| 95 | + get_args['cli_action'] = "resource" |
| 96 | + get_args['name'] = self.module.params['name'] |
| 97 | + |
| 98 | + self.vars.set('get_args', get_args) |
| 99 | + self.vars.set('previous_value', self._get()['out']) |
| 100 | + self.vars.set('value', self.vars.previous_value, change=True, diff=True) |
| 101 | + |
| 102 | + if not self.module.params['state']: |
| 103 | + self.module.deprecate( |
| 104 | + 'Parameter "state" values not set is being deprecated. Make sure to provide a value for "state"', |
| 105 | + version='12.0.0', |
| 106 | + collection_name='community.general' |
| 107 | + ) |
| 108 | + |
| 109 | + def __quit_module__(self): |
| 110 | + self.vars.set('value', self._get()['out']) |
| 111 | + |
| 112 | + def _process_command_output(self, fail_on_err, ignore_err_msg=""): |
| 113 | + def process(rc, out, err): |
| 114 | + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: |
| 115 | + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) |
| 116 | + out = out.rstrip() |
| 117 | + return None if out == "" else out |
| 118 | + return process |
| 119 | + |
| 120 | + def _get(self): |
| 121 | + with self.runner('cli_action state name') as ctx: |
| 122 | + result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name']) |
| 123 | + return dict([('rc', result[0]), |
| 124 | + ('out', result[1] if result[1] != "" else None), |
| 125 | + ('err', result[2])]) |
| 126 | + |
| 127 | + def state_cleanup(self): |
| 128 | + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: |
| 129 | + ctx.run(cli_action='resource') |
| 130 | + |
| 131 | + def state_offline(self): |
| 132 | + with self.runner('cli_action state name apply_all wait', |
| 133 | + output_process=self._process_command_output(True, "not currently running"), |
| 134 | + check_mode_skip=True) as ctx: |
| 135 | + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) |
| 136 | + |
| 137 | + def state_online(self): |
| 138 | + with self.runner('cli_action state name apply_all wait', |
| 139 | + output_process=self._process_command_output(True, "currently running"), |
| 140 | + check_mode_skip=True) as ctx: |
| 141 | + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) |
| 142 | + |
| 143 | + if get_pacemaker_maintenance_mode(self.runner): |
| 144 | + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: |
| 145 | + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') |
| 146 | + |
| 147 | + def state_maintenance(self): |
| 148 | + with self.runner('cli_action state name', |
| 149 | + output_process=self._process_command_output(True, "Fail"), |
| 150 | + check_mode_skip=True) as ctx: |
| 151 | + ctx.run(cli_action='property', name='maintenance-mode=true') |
| 152 | + |
| 153 | + def state_restart(self): |
| 154 | + with self.runner('cli_action state name apply_all wait', |
| 155 | + output_process=self._process_command_output(True, "not currently running"), |
| 156 | + check_mode_skip=True) as ctx: |
| 157 | + ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) |
| 158 | + ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) |
| 159 | + |
| 160 | + if get_pacemaker_maintenance_mode(self.runner): |
| 161 | + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: |
| 162 | + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') |
122 | 163 |
|
123 | 164 |
|
124 | 165 | def main(): |
125 | | - argument_spec = dict( |
126 | | - state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), |
127 | | - node=dict(type='str'), |
128 | | - timeout=dict(type='int', default=300), |
129 | | - force=dict(type='bool', default=True), |
130 | | - ) |
131 | | - |
132 | | - module = AnsibleModule( |
133 | | - argument_spec, |
134 | | - supports_check_mode=True, |
135 | | - ) |
136 | | - changed = False |
137 | | - state = module.params['state'] |
138 | | - node = module.params['node'] |
139 | | - force = module.params['force'] |
140 | | - timeout = module.params['timeout'] |
141 | | - |
142 | | - if state in ['online', 'offline']: |
143 | | - # Get cluster status |
144 | | - if node is None: |
145 | | - cluster_state = get_cluster_status(module) |
146 | | - if cluster_state == state: |
147 | | - module.exit_json(changed=changed, out=cluster_state) |
148 | | - else: |
149 | | - if module.check_mode: |
150 | | - module.exit_json(changed=True) |
151 | | - set_cluster(module, state, timeout, force) |
152 | | - cluster_state = get_cluster_status(module) |
153 | | - if cluster_state == state: |
154 | | - module.exit_json(changed=True, out=cluster_state) |
155 | | - else: |
156 | | - module.fail_json(msg="Fail to bring the cluster %s" % state) |
157 | | - else: |
158 | | - cluster_state = get_node_status(module, node) |
159 | | - # Check cluster state |
160 | | - for node_state in cluster_state: |
161 | | - if node_state[1].strip().lower() == state: |
162 | | - module.exit_json(changed=changed, out=cluster_state) |
163 | | - else: |
164 | | - if module.check_mode: |
165 | | - module.exit_json(changed=True) |
166 | | - # Set cluster status if needed |
167 | | - set_cluster(module, state, timeout, force) |
168 | | - cluster_state = get_node_status(module, node) |
169 | | - module.exit_json(changed=True, out=cluster_state) |
170 | | - |
171 | | - elif state == 'restart': |
172 | | - if module.check_mode: |
173 | | - module.exit_json(changed=True) |
174 | | - set_cluster(module, 'offline', timeout, force) |
175 | | - cluster_state = get_cluster_status(module) |
176 | | - if cluster_state == 'offline': |
177 | | - set_cluster(module, 'online', timeout, force) |
178 | | - cluster_state = get_cluster_status(module) |
179 | | - if cluster_state == 'online': |
180 | | - module.exit_json(changed=True, out=cluster_state) |
181 | | - else: |
182 | | - module.fail_json(msg="Failed during the restart of the cluster, the cluster cannot be started") |
183 | | - else: |
184 | | - module.fail_json(msg="Failed during the restart of the cluster, the cluster cannot be stopped") |
185 | | - |
186 | | - elif state == 'cleanup': |
187 | | - if module.check_mode: |
188 | | - module.exit_json(changed=True) |
189 | | - clean_cluster(module, timeout) |
190 | | - cluster_state = get_cluster_status(module) |
191 | | - module.exit_json(changed=True, out=cluster_state) |
| 166 | + PacemakerCluster.execute() |
192 | 167 |
|
193 | 168 |
|
194 | 169 | if __name__ == '__main__': |
|
0 commit comments