Skip to content

Commit 5d7ecf8

Browse files
committed
Fixing merge issue
ProxySQL add reader node if not in reader group
1 parent 7d7b623 commit 5d7ecf8

File tree

5 files changed

+12
-69
lines changed

5 files changed

+12
-69
lines changed

cluster/cluster_chk.go

+1-5
Original file line numberDiff line numberDiff line change
@@ -260,11 +260,7 @@ func (cluster *Cluster) isFoundCandidateMaster() bool {
260260
return true
261261
}
262262
key := -1
263-
if cluster.Conf.MultiMasterGrouprep {
264-
key = cluster.electSwitchoverGroupReplicationCandidate(cluster.slaves, true)
265-
} else {
266-
key = cluster.electFailoverCandidate(cluster.slaves, false)
267-
}
263+
key = cluster.electFailoverCandidate(cluster.slaves, false)
268264
if key == -1 {
269265
cluster.sme.AddState("ERR00032", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00032"]), ErrFrom: "CHECK"})
270266
return false

cluster/cluster_topo.go

+2-28
Original file line numberDiff line numberDiff line change
@@ -170,23 +170,9 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error {
170170
cluster.LogPrintf(LvlDbg, "Server %s is configured as a slave", sv.URL)
171171
}
172172
cluster.slaves = append(cluster.slaves, sv)
173-
<<<<<<< HEAD
174173
} else {
175174
// not slave
176-
177175
if sv.BinlogDumpThreads == 0 && sv.State != stateMaster {
178-
=======
179-
} else { // not slave
180-
if sv.IsGroupReplicationMaster {
181-
cluster.master = cluster.Servers[k]
182-
cluster.vmaster = cluster.Servers[k]
183-
cluster.master.SetMaster()
184-
if cluster.master.IsReadOnly() {
185-
cluster.master.SetReadWrite()
186-
cluster.LogPrintf(LvlInfo, "Group replication server %s disable read only ", cluster.master.URL)
187-
}
188-
} else if sv.BinlogDumpThreads == 0 && sv.State != stateMaster {
189-
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
190176
//sv.State = stateUnconn
191177
//transition to standalone may happen despite server have never connect successfully when default to suspect
192178
if cluster.Conf.LogLevel > 2 {
@@ -201,15 +187,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error {
201187
cluster.SetState("ERR00063", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00063"]), ErrFrom: "TOPO"})
202188
// cluster.Servers[k].RejoinMaster() /* remove for rolling restart , wrongly rejoin server as master before just after swithover while the server is just stopping */
203189
} else {
204-
<<<<<<< HEAD
205-
=======
206-
if cluster.Conf.LogLevel > 2 {
207-
cluster.LogPrintf(LvlDbg, "Server %s was set master as last non slave", sv.URL)
208-
}
209-
if len(cluster.Servers) == 1 {
210-
cluster.Conf.ActivePassive = true
211-
}
212-
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
190+
213191
cluster.master = cluster.Servers[k]
214192
cluster.master.SetMaster()
215193
if cluster.master.IsReadOnly() && !cluster.master.IsRelay {
@@ -223,11 +201,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error {
223201
} //end loop all servers
224202

225203
// If no cluster.slaves are detected, generate an error
226-
<<<<<<< HEAD
227-
if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep {
228-
=======
229-
if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep && cluster.GetTopology() != topoMultiMasterGrouprep && cluster.GetTopology() != topoActivePassive {
230-
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
204+
if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep && cluster.GetTopology() != topoActivePassive {
231205
cluster.SetState("ERR00010", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00010"]), ErrFrom: "TOPO"})
232206
}
233207

cluster/prx_proxysql.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -401,16 +401,20 @@ func (proxy *ProxySQLProxy) Refresh() error {
401401
updated = true
402402
}
403403
} else if s.IsSlave && !s.IsIgnored() && (s.PrevState == stateUnconn || s.PrevState == stateFailed) {
404+
404405
err = psql.SetReader(misc.Unbracket(s.Host), s.Port)
406+
405407
if cluster.Conf.ProxysqlDebug {
406408
cluster.LogPrintf(LvlInfo, "Monitor ProxySQL setting reader standalone server %s", s.URL)
407409
}
408410
if err != nil {
409411
cluster.sme.AddState("ERR00072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00072"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name})
410412
}
411413
updated = true
414+
} else if s.IsSlave && !isFoundBackendRead && !s.IsIgnored() {
415+
err = psql.AddServerAsReader(misc.Unbracket(s.Host), s.Port, "1", strconv.Itoa(s.ClusterGroup.Conf.PRXServersBackendMaxReplicationLag), strconv.Itoa(s.ClusterGroup.Conf.PRXServersBackendMaxConnections), strconv.Itoa(misc.Bool2Int(s.ClusterGroup.Conf.PRXServersBackendCompression)), proxy.UseSSL())
416+
updated = true
412417
}
413-
414418
} //if bootstrap
415419

416420
// load the grants

cluster/srv_rejoin.go

+3-31
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ func (server *ServerMonitor) RejoinMaster() error {
5656
if server.URL != server.ClusterGroup.master.URL {
5757
server.ClusterGroup.SetState("WARN0022", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0022"], server.URL, server.ClusterGroup.master.URL), ErrFrom: "REJOIN"})
5858
server.RejoinScript()
59+
5960
if server.ClusterGroup.Conf.FailoverSemiSyncState {
6061
server.ClusterGroup.LogPrintf("INFO", "Set semisync replica and disable semisync leader %s", server.URL)
6162
logs, err := server.SetSemiSyncReplica()
@@ -90,37 +91,10 @@ func (server *ServerMonitor) RejoinMaster() error {
9091
server.ClusterGroup.LogPrintf("ERROR", "State transfer rejoin failed")
9192
}
9293
}
93-
<<<<<<< HEAD
9494
if server.ClusterGroup.Conf.AutorejoinBackupBinlog == true {
9595
server.saveBinlog(crash)
9696
}
9797

98-
}
99-
} else {
100-
//no master discovered
101-
if server.ClusterGroup.lastmaster != nil {
102-
if server.ClusterGroup.lastmaster.ServerID == server.ServerID {
103-
server.ClusterGroup.LogPrintf("INFO", "Rediscovering last seen master: %s", server.URL)
104-
server.ClusterGroup.master = server
105-
server.ClusterGroup.lastmaster = nil
106-
} else {
107-
if server.ClusterGroup.Conf.FailRestartUnsafe == false {
108-
server.ClusterGroup.LogPrintf("INFO", "Rediscovering last seen master: %s", server.URL)
109-
110-
server.rejoinMasterAsSlave()
111-
112-
}
113-
}
114-
} else {
115-
if server.ClusterGroup.Conf.FailRestartUnsafe == true {
116-
server.ClusterGroup.LogPrintf("INFO", "Restart Unsafe Picking first non-slave as master: %s", server.URL)
117-
server.ClusterGroup.master = server
118-
}
119-
}
120-
// if consul or internal proxy need to adapt read only route to new slaves
121-
server.ClusterGroup.backendStateChangeProxies()
122-
=======
123-
12498
// if consul or internal proxy need to adapt read only route to new slaves
12599
server.ClusterGroup.backendStateChangeProxies()
126100
}
@@ -137,17 +111,15 @@ func (server *ServerMonitor) RejoinMaster() error {
137111
if server.ClusterGroup.Conf.FailRestartUnsafe == false {
138112
server.ClusterGroup.LogPrintf("INFO", "Rediscovering not the master from last seen master: %s", server.URL)
139113
server.rejoinMasterAsSlave()
140-
// if consul or internal proxy need to adapt read only route to new slaves
141-
server.ClusterGroup.backendStateChangeProxies()
142114
} else {
143115
server.ClusterGroup.LogPrintf("INFO", "Rediscovering unsafe possibly electing old leader after cascading failure to flavor availability: %s", server.URL)
144116
server.ClusterGroup.master = server
145117
}
146118
}
147-
119+
// if consul or internal proxy need to adapt read only route to new slaves
120+
server.ClusterGroup.backendStateChangeProxies()
148121
} // we have last seen master
149122

150-
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
151123
}
152124
return nil
153125
}

cluster/srv_set.go

+1-4
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,11 @@ func (server *ServerMonitor) SetState(state string) {
5757
}
5858

5959
func (server *ServerMonitor) SetPrevState(state string) {
60-
<<<<<<< HEAD
61-
server.ClusterGroup.LogPrintf(LvlInfo, "Server %s previous state changed to: %s", server.URL, state)
62-
=======
60+
6361
if state == "" {
6462
return
6563
}
6664
server.ClusterGroup.LogPrintf(LvlInfo, "Server %s previous state set to: %s", server.URL, state)
67-
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
6865
server.PrevState = state
6966
}
7067

0 commit comments

Comments
 (0)