@@ -25,6 +25,7 @@ import (
25
25
"github.com/stretchr/testify/assert"
26
26
"github.com/stretchr/testify/require"
27
27
28
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
28
29
"go.etcd.io/etcd/api/v3/version"
29
30
"go.etcd.io/etcd/client/pkg/v3/fileutil"
30
31
"go.etcd.io/etcd/client/pkg/v3/types"
@@ -51,6 +52,10 @@ func TestDowngradeUpgradeClusterOf1(t *testing.T) {
51
52
testDowngradeUpgrade (t , 1 , 1 , false , noCancellation )
52
53
}
53
54
55
+ func TestDowngradeUpgrade2InClusterOf3 (t * testing.T ) {
56
+ testDowngradeUpgrade (t , 2 , 3 , false , noCancellation )
57
+ }
58
+
54
59
func TestDowngradeUpgradeClusterOf3 (t * testing.T ) {
55
60
testDowngradeUpgrade (t , 3 , 3 , false , noCancellation )
56
61
}
@@ -128,6 +133,9 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
128
133
time .Sleep (etcdserver .HealthInterval )
129
134
}
130
135
136
+ t .Log ("Downgrade should be disabled" )
137
+ e2e .ValidateDowngradeInfo (t , epc , & pb.DowngradeInfo {Enabled : false })
138
+
131
139
t .Log ("Adding member to test membership, but a learner avoid breaking quorum" )
132
140
resp , err := cc .MemberAddAsLearner (context .Background (), "fake1" , []string {"http://127.0.0.1:1001" })
133
141
require .NoError (t , err )
@@ -150,6 +158,10 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
150
158
return // No need to perform downgrading, end the test here
151
159
}
152
160
e2e .DowngradeEnable (t , epc , lastVersion )
161
+
162
+ t .Log ("Downgrade should be enabled" )
163
+ e2e .ValidateDowngradeInfo (t , epc , & pb.DowngradeInfo {Enabled : true , TargetVersion : lastClusterVersion .String ()})
164
+
153
165
if triggerCancellation == cancelRightAfterEnable {
154
166
t .Logf ("Cancelling downgrade right after enabling (no node is downgraded yet)" )
155
167
e2e .DowngradeCancel (t , epc )
@@ -162,10 +174,10 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
162
174
t .Logf ("Elect members for operations on members: %v" , membersToChange )
163
175
164
176
t .Logf ("Starting downgrade process to %q" , lastVersionStr )
165
- err = e2e .DowngradeUpgradeMembersByID (t , nil , epc , membersToChange , currentVersion , lastClusterVersion )
177
+ err = e2e .DowngradeUpgradeMembersByID (t , nil , epc , membersToChange , true , currentVersion , lastClusterVersion )
166
178
require .NoError (t , err )
167
179
if len (membersToChange ) == len (epc .Procs ) {
168
- e2e .AssertProcessLogs (t , leader ( t , epc ) , "the cluster has been downgraded" )
180
+ e2e .AssertProcessLogs (t , epc . Procs [ epc . WaitLeader ( t )] , "the cluster has been downgraded" )
169
181
}
170
182
171
183
t .Log ("Downgrade complete" )
@@ -198,10 +210,19 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
198
210
beforeMembers , beforeKV = getMembersAndKeys (t , cc )
199
211
200
212
t .Logf ("Starting upgrade process to %q" , currentVersionStr )
201
- err = e2e .DowngradeUpgradeMembersByID (t , nil , epc , membersToChange , lastClusterVersion , currentVersion )
213
+ downgradeEnabled := triggerCancellation == noCancellation && numberOfMembersToDowngrade < clusterSize
214
+ err = e2e .DowngradeUpgradeMembersByID (t , nil , epc , membersToChange , downgradeEnabled , lastClusterVersion , currentVersion )
202
215
require .NoError (t , err )
203
216
t .Log ("Upgrade complete" )
204
217
218
+ if downgradeEnabled {
219
+ t .Log ("Downgrade should be still enabled" )
220
+ e2e .ValidateDowngradeInfo (t , epc , & pb.DowngradeInfo {Enabled : true , TargetVersion : lastClusterVersion .String ()})
221
+ } else {
222
+ t .Log ("Downgrade should be disabled" )
223
+ e2e .ValidateDowngradeInfo (t , epc , & pb.DowngradeInfo {Enabled : false })
224
+ }
225
+
205
226
afterMembers , afterKV = getMembersAndKeys (t , cc )
206
227
assert .Equal (t , beforeKV .Kvs , afterKV .Kvs )
207
228
assert .Equal (t , beforeMembers .Members , afterMembers .Members )
@@ -224,27 +245,6 @@ func newCluster(t *testing.T, clusterSize int, snapshotCount uint64) *e2e.EtcdPr
224
245
return epc
225
246
}
226
247
227
- func leader (t * testing.T , epc * e2e.EtcdProcessCluster ) e2e.EtcdProcess {
228
- ctx , cancel := context .WithTimeout (context .Background (), time .Second * 10 )
229
- defer cancel ()
230
- for i := 0 ; i < len (epc .Procs ); i ++ {
231
- endpoints := epc .Procs [i ].EndpointsGRPC ()
232
- cli , err := clientv3 .New (clientv3.Config {
233
- Endpoints : endpoints ,
234
- DialTimeout : 3 * time .Second ,
235
- })
236
- require .NoError (t , err )
237
- defer cli .Close ()
238
- resp , err := cli .Status (ctx , endpoints [0 ])
239
- require .NoError (t , err )
240
- if resp .Header .GetMemberId () == resp .Leader {
241
- return epc .Procs [i ]
242
- }
243
- }
244
- t .Fatal ("Leader not found" )
245
- return nil
246
- }
247
-
248
248
func generateSnapshot (t * testing.T , snapshotCount uint64 , cc * e2e.EtcdctlV3 ) {
249
249
ctx , cancel := context .WithCancel (context .Background ())
250
250
defer cancel ()
0 commit comments