|
| 1 | +package replicas |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "sync/atomic" |
| 7 | + "testing" |
| 8 | + "time" |
| 9 | + |
| 10 | + "github.com/sirupsen/logrus" |
| 11 | + "github.com/stretchr/testify/require" |
| 12 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 13 | + "k8s.io/client-go/kubernetes/fake" |
| 14 | + |
| 15 | + "castai-agent/internal/config" |
| 16 | + "castai-agent/pkg/services/providers/types" |
| 17 | +) |
| 18 | + |
| 19 | +func TestRegisterClusterWithLease(t *testing.T) { |
| 20 | + tests := map[string]struct { |
| 21 | + registerFn func(ctx context.Context) (*types.ClusterRegistration, error) |
| 22 | + setupCtx func() (context.Context, context.CancelFunc) |
| 23 | + verify func(t *testing.T, reg *types.ClusterRegistration, err error, clientset *fake.Clientset, cfg config.LeaderElectionConfig) |
| 24 | + }{ |
| 25 | + "registers cluster successfully": { |
| 26 | + registerFn: func(ctx context.Context) (*types.ClusterRegistration, error) { |
| 27 | + return &types.ClusterRegistration{ |
| 28 | + ClusterID: "cluster-123", |
| 29 | + OrganizationID: "org-456", |
| 30 | + }, nil |
| 31 | + }, |
| 32 | + setupCtx: func() (context.Context, context.CancelFunc) { |
| 33 | + return context.WithTimeout(context.Background(), 10*time.Second) |
| 34 | + }, |
| 35 | + verify: func(t *testing.T, reg *types.ClusterRegistration, err error, clientset *fake.Clientset, cfg config.LeaderElectionConfig) { |
| 36 | + require.NoError(t, err) |
| 37 | + require.NotNil(t, reg) |
| 38 | + require.Equal(t, "cluster-123", reg.ClusterID) |
| 39 | + require.Equal(t, "org-456", reg.OrganizationID) |
| 40 | + |
| 41 | + // Verify lease was created |
| 42 | + leaseName := cfg.LockName + "-registration" |
| 43 | + lease, err := clientset.CoordinationV1().Leases(cfg.Namespace).Get(context.Background(), leaseName, metav1.GetOptions{}) |
| 44 | + require.NoError(t, err) |
| 45 | + require.NotNil(t, lease) |
| 46 | + }, |
| 47 | + }, |
| 48 | + "propagates registration error": { |
| 49 | + registerFn: func(ctx context.Context) (*types.ClusterRegistration, error) { |
| 50 | + return nil, fmt.Errorf("api unavailable") |
| 51 | + }, |
| 52 | + setupCtx: func() (context.Context, context.CancelFunc) { |
| 53 | + return context.WithTimeout(context.Background(), 10*time.Second) |
| 54 | + }, |
| 55 | + verify: func(t *testing.T, reg *types.ClusterRegistration, err error, clientset *fake.Clientset, cfg config.LeaderElectionConfig) { |
| 56 | + require.Error(t, err) |
| 57 | + require.ErrorContains(t, err, "api unavailable") |
| 58 | + require.Nil(t, reg) |
| 59 | + }, |
| 60 | + }, |
| 61 | + "returns error on context cancellation": { |
| 62 | + registerFn: func(ctx context.Context) (*types.ClusterRegistration, error) { |
| 63 | + return &types.ClusterRegistration{ClusterID: "should-not-reach"}, nil |
| 64 | + }, |
| 65 | + setupCtx: func() (context.Context, context.CancelFunc) { |
| 66 | + ctx, cancel := context.WithCancel(context.Background()) |
| 67 | + cancel() // cancel immediately |
| 68 | + return ctx, cancel |
| 69 | + }, |
| 70 | + verify: func(t *testing.T, reg *types.ClusterRegistration, err error, clientset *fake.Clientset, cfg config.LeaderElectionConfig) { |
| 71 | + require.Error(t, err) |
| 72 | + require.ErrorIs(t, err, context.Canceled) |
| 73 | + require.Nil(t, reg) |
| 74 | + }, |
| 75 | + }, |
| 76 | + } |
| 77 | + |
| 78 | + for name, tt := range tests { |
| 79 | + t.Run(name, func(t *testing.T) { |
| 80 | + log := logrus.New() |
| 81 | + log.SetLevel(logrus.DebugLevel) |
| 82 | + |
| 83 | + cfg := config.LeaderElectionConfig{ |
| 84 | + LockName: "test-lock", |
| 85 | + Namespace: "default", |
| 86 | + } |
| 87 | + |
| 88 | + clientset := fake.NewClientset() |
| 89 | + |
| 90 | + ctx, cancel := tt.setupCtx() |
| 91 | + defer cancel() |
| 92 | + |
| 93 | + reg, err := RegisterClusterWithLease(ctx, log, cfg, clientset, tt.registerFn) |
| 94 | + tt.verify(t, reg, err, clientset, cfg) |
| 95 | + }) |
| 96 | + } |
| 97 | +} |
| 98 | + |
| 99 | +func TestRegisterClusterWithLease_SerializesConcurrentRegistrations(t *testing.T) { |
| 100 | + log := logrus.New() |
| 101 | + log.SetLevel(logrus.DebugLevel) |
| 102 | + |
| 103 | + cfg := config.LeaderElectionConfig{ |
| 104 | + LockName: "test-lock", |
| 105 | + Namespace: "default", |
| 106 | + } |
| 107 | + |
| 108 | + clientset := fake.NewClientset() |
| 109 | + |
| 110 | + var maxConcurrent atomic.Int32 |
| 111 | + var currentConcurrent atomic.Int32 |
| 112 | + |
| 113 | + registerFn := func(ctx context.Context) (*types.ClusterRegistration, error) { |
| 114 | + cur := currentConcurrent.Add(1) |
| 115 | + for { |
| 116 | + old := maxConcurrent.Load() |
| 117 | + if cur <= old || maxConcurrent.CompareAndSwap(old, cur) { |
| 118 | + break |
| 119 | + } |
| 120 | + } |
| 121 | + time.Sleep(50 * time.Millisecond) // simulate work |
| 122 | + currentConcurrent.Add(-1) |
| 123 | + return &types.ClusterRegistration{ClusterID: "cluster-123"}, nil |
| 124 | + } |
| 125 | + |
| 126 | + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
| 127 | + defer cancel() |
| 128 | + |
| 129 | + const numGoroutines = 3 |
| 130 | + errs := make(chan error, numGoroutines) |
| 131 | + results := make(chan *types.ClusterRegistration, numGoroutines) |
| 132 | + |
| 133 | + for i := 0; i < numGoroutines; i++ { |
| 134 | + go func() { |
| 135 | + reg, err := RegisterClusterWithLease(ctx, log, cfg, clientset, registerFn) |
| 136 | + errs <- err |
| 137 | + results <- reg |
| 138 | + }() |
| 139 | + } |
| 140 | + |
| 141 | + for i := 0; i < numGoroutines; i++ { |
| 142 | + err := <-errs |
| 143 | + require.NoError(t, err) |
| 144 | + reg := <-results |
| 145 | + require.NotNil(t, reg) |
| 146 | + require.Equal(t, "cluster-123", reg.ClusterID) |
| 147 | + } |
| 148 | + |
| 149 | + require.Equal(t, int32(1), maxConcurrent.Load(), "registerFn should never run concurrently") |
| 150 | +} |
0 commit comments