Skip to content

Commit 6d41413

Browse files
authored
Fix DockerCluster with non-HA backend (#1738)
When running a non-HA storage backend, such as inmem, ensureLeaderMatches will timeout through the context as leader.IsSelf never reports true in a non-HA environment. Ensure we only have a single node during cluster setup and annotate it as being HA disabled. setupNode0's return code was not checked. This made the HA support _technically_ work, if we used a scoped context which had a shorter timeout (but, longer than the Docker calls took), but was hard to accurately time. Signed-off-by: Alexander Scheel <ascheel@gitlab.com>
1 parent 01c2bd5 commit 6d41413

File tree

1 file changed

+25
-10
lines changed

1 file changed

+25
-10
lines changed

sdk/helper/testcluster/docker/environment.go

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,9 @@ type DockerCluster struct {
7474
builtTags map[string]struct{}
7575

7676
storage testcluster.ClusterStorage
77+
78+
// Whether HA mode is disabled
79+
HADisabled bool
7780
}
7881

7982
func (dc *DockerCluster) NamedLogger(s string) log.Logger {
@@ -208,15 +211,17 @@ func (dc *DockerCluster) setupNode0(ctx context.Context) error {
208211
return err
209212
}
210213

211-
err = ensureLeaderMatches(ctx, client, func(leader *api.LeaderResponse) error {
212-
if !leader.IsSelf {
213-
return fmt.Errorf("node %d leader=%v, expected=%v", 0, leader.IsSelf, true)
214-
}
214+
if !dc.HADisabled {
215+
err = ensureLeaderMatches(ctx, client, func(leader *api.LeaderResponse) error {
216+
if !leader.IsSelf {
217+
return fmt.Errorf("node %d leader=%v, expected=%v", 0, leader.IsSelf, true)
218+
}
215219

216-
return nil
217-
})
218-
if err != nil {
219-
return err
220+
return nil
221+
})
222+
if err != nil {
223+
return err
224+
}
220225
}
221226

222227
status, err := client.Sys().SealStatusWithContext(ctx)
@@ -443,6 +448,11 @@ func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerC
443448
builtTags: map[string]struct{}{},
444449
CA: opts.CA,
445450
storage: opts.Storage,
451+
HADisabled: opts.HADisabled,
452+
}
453+
454+
if dc.HADisabled && opts.NumCores > 1 {
455+
return nil, fmt.Errorf("expected at most one core (%v) when HA mode is disabled", opts.NumCores)
446456
}
447457

448458
if err := dc.setupDockerCluster(ctx, opts); err != nil {
@@ -950,6 +960,7 @@ type DockerClusterOptions struct {
950960
Storage testcluster.ClusterStorage
951961
Root bool
952962
Entrypoint string
963+
HADisabled bool
953964
}
954965

955966
func DefaultOptions(t *testing.T) *DockerClusterOptions {
@@ -1012,7 +1023,11 @@ func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClu
10121023

10131024
var numCores int
10141025
if opts.NumCores == 0 {
1015-
numCores = DefaultNumCores
1026+
if dc.HADisabled {
1027+
numCores = 1
1028+
} else {
1029+
numCores = DefaultNumCores
1030+
}
10161031
} else {
10171032
numCores = opts.NumCores
10181033
}
@@ -1040,7 +1055,7 @@ func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClu
10401055
}
10411056
if i == 0 {
10421057
if err := dc.setupNode0(ctx); err != nil {
1043-
return nil
1058+
return err
10441059
}
10451060
} else {
10461061
if err := dc.joinNode(ctx, i, 0); err != nil {

0 commit comments

Comments
 (0)