Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 24 additions & 30 deletions util/db/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,7 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
clusterList := appv1.ClusterList{
Items: make([]appv1.Cluster, 0),
}
settings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
inClusterEnabled := settings.InClusterEnabled
inClusterEnabled := isInClusterEnabled(db.settingsMgr)
hasInClusterCredentials := false
for _, clusterSecret := range clusterSecrets {
cluster, err := SecretToCluster(clusterSecret)
Expand All @@ -98,11 +94,7 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
// CreateCluster creates a cluster
func (db *db) CreateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error) {
if c.Server == appv1.KubernetesInternalAPIServerAddr {
settings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
if !settings.InClusterEnabled {
if !isInClusterEnabled(db.settingsMgr) {
return nil, status.Errorf(codes.InvalidArgument, "cannot register cluster: in-cluster has been disabled")
}
}
Expand Down Expand Up @@ -148,13 +140,9 @@ func (db *db) WatchClusters(ctx context.Context,
handleModEvent func(oldCluster *appv1.Cluster, newCluster *appv1.Cluster),
handleDeleteEvent func(clusterServer string),
) error {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return err
}

localCls := db.getLocalCluster()
if argoSettings.InClusterEnabled {
var err error
if isInClusterEnabled(db.settingsMgr) {
localCls, err = db.GetCluster(ctx, appv1.KubernetesInternalAPIServerAddr)
if err != nil {
return fmt.Errorf("could not get local cluster: %w", err)
Expand All @@ -173,7 +161,7 @@ func (db *db) WatchClusters(ctx context.Context,
return
}
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
if argoSettings.InClusterEnabled {
if isInClusterEnabled(db.settingsMgr) {
// change local cluster event to modified, since it cannot be added at runtime
handleModEvent(localCls, cluster)
localCls = cluster
Expand Down Expand Up @@ -201,7 +189,7 @@ func (db *db) WatchClusters(ctx context.Context,
},

func(secret *corev1.Secret) {
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr && argoSettings.InClusterEnabled {
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr && isInClusterEnabled(db.settingsMgr) {
// change local cluster event to modified, since it cannot be deleted at runtime, unless disabled.
newLocalCls := db.getLocalCluster()
handleModEvent(localCls, newLocalCls)
Expand Down Expand Up @@ -233,11 +221,7 @@ func (db *db) getClusterSecret(server string) (*corev1.Secret, error) {
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
informer := db.settingsMgr.GetClusterInformer()
if server == appv1.KubernetesInternalAPIServerAddr {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
if !argoSettings.InClusterEnabled {
if !isInClusterEnabled(db.settingsMgr) {
return nil, status.Errorf(codes.NotFound, "cluster %q is disabled", server)
}

Expand Down Expand Up @@ -282,24 +266,19 @@ func (db *db) GetProjectClusters(_ context.Context, project string) ([]*appv1.Cl
}

func (db *db) GetClusterServersByName(_ context.Context, name string) ([]string, error) {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}

informer := db.settingsMgr.GetClusterInformer()
servers, err := informer.GetClusterServersByName(name)
if err != nil {
return nil, err
}

// Handle local cluster special case
if len(servers) == 0 && name == "in-cluster" && argoSettings.InClusterEnabled {
if len(servers) == 0 && name == "in-cluster" && isInClusterEnabled(db.settingsMgr) {
return []string{appv1.KubernetesInternalAPIServerAddr}, nil
}

// Filter out disabled in-cluster
if !argoSettings.InClusterEnabled {
if !isInClusterEnabled(db.settingsMgr) {
filtered := make([]string, 0, len(servers))
for _, s := range servers {
if s != appv1.KubernetesInternalAPIServerAddr {
Expand Down Expand Up @@ -462,3 +441,18 @@ func SecretToCluster(s *corev1.Secret) (*appv1.Cluster, error) {
}
return &cluster, nil
}

// isInClusterEnabled returns false if explicitly disabled by the user, true otherwise.
func isInClusterEnabled(settingsMgr *settings.SettingsManager) bool {
argoSettings, err := settingsMgr.GetSettings()
Copy link
Member

@choejwoo choejwoo Mar 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi, thanks for the PR!

This is slightly different from the previous contract discussion, but since this code path only needs InClusterEnabled while GetSettings() assembles much broader state, would it make sense to add a narrower helper on SettingsManager for just this flag so callers do not need to depend on the broader GetSettings() contract here?

Something along these lines, for example,

func (mgr *SettingsManager) IsInClusterEnabled() (bool, error) {
	argoCDCM, err := mgr.getConfigMap()
	if err != nil {
		return true, fmt.Errorf("error retrieving argocd-cm: %w", err)
	}
	return argoCDCM.Data[inClusterEnabledKey] != "false", nil
}

Also, I notice some paths in this file now call isInClusterEnabled(...) multiple times within the same flow, so even if introducing a narrower helper feels out of scope here, it may still be worth reading the value once and reusing it locally where possible.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks @choejwoo for your review, I agree that a direct helper in SettingsManager will be better. Let me make that change.

if err != nil {
if argoSettings != nil {
// Settings are incomplete (e.g. missing server.secretkey) but the
// ConfigMap values including InClusterEnabled were loaded successfully.
return argoSettings.InClusterEnabled
}
log.Warnf("could not confirm if in-cluster is disabled, defaulting to enabled: %v", err)
return true
}
return argoSettings.InClusterEnabled
}
38 changes: 38 additions & 0 deletions util/db/cluster_norace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,44 @@ func TestWatchClusters_LocalClusterModifications(t *testing.T) {
assert.True(t, completed, "Failed due to timeout")
}

func TestWatchClusters_MissingServerSecretKey(t *testing.T) {
// !race:
// Intermittent failure when running with -race, likely due to race condition
// https://github.com/argoproj/argo-cd/issues/4755
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
completed := runWatchTest(t, db, []func(old *v1alpha1.Cluster, new *v1alpha1.Cluster){
func(old *v1alpha1.Cluster, new *v1alpha1.Cluster) {
assert.Nil(t, old)
assert.Equal(t, v1alpha1.KubernetesInternalAPIServerAddr, new.Server)
},
})
assert.True(t, completed, "WatchClusters should work even when server.secretkey is missing")
}

func TestWatchClusters_LocalClusterModificationsWhenDisabled(t *testing.T) {
// !race:
// Intermittent failure when running TestWatchClusters_LocalClusterModifications with -race, likely due to race condition
Expand Down
227 changes: 227 additions & 0 deletions util/db/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -661,6 +661,233 @@ func TestGetClusterServersByName(t *testing.T) {
})
}

func TestIsInClusterEnabled(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDConfigMapWithInClusterDisabled := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{"cluster.inClusterEnabled": "false"},
}
argoCDSecretWithSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
"server.secretkey": nil,
},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}

t.Run("enabled by default", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
require.True(t, isInClusterEnabled(settingsManager))
})

t.Run("explicitly disabled", func(t *testing.T) {
kubeclientset := fake.NewClientset(argoCDConfigMapWithInClusterDisabled, argoCDSecretWithSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
require.False(t, isInClusterEnabled(settingsManager))
})

t.Run("defaults to enabled when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
require.True(t, isInClusterEnabled(settingsManager))
})

t.Run("respects explicit disable even when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(argoCDConfigMapWithInClusterDisabled, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
require.False(t, isInClusterEnabled(settingsManager))
})
}

func TestCreateCluster_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}

t.Run("in-cluster creation succeeds when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)

_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Name: "in-cluster",
})
require.NoError(t, err)
})

t.Run("external cluster creation succeeds when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)

_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: "https://my-external-cluster",
Name: "external",
})
require.NoError(t, err)
})

t.Run("in-cluster creation rejected when explicitly disabled even with missing server.secretkey", func(t *testing.T) {
argoCDConfigMapWithInClusterDisabled := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{"cluster.inClusterEnabled": "false"},
}
kubeclientset := fake.NewClientset(argoCDConfigMapWithInClusterDisabled, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)

_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Name: "in-cluster",
})
require.Error(t, err)
require.Contains(t, err.Error(), "in-cluster has been disabled")
})
}

func TestListClusters_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}

t.Run("lists clusters including implicit in-cluster when server.secretkey is missing", func(t *testing.T) {
externalClusterSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "mycluster",
Namespace: fakeNamespace,
Labels: map[string]string{
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
},
},
Data: map[string][]byte{
"server": []byte("https://my-external-cluster"),
"name": []byte("external"),
},
}
kubeclientset := fake.NewClientset(externalClusterSecret, emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)

clusters, err := db.ListClusters(t.Context())
require.NoError(t, err)
require.Len(t, clusters.Items, 2)
})
}

func TestGetClusterServersByName_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}

t.Run("returns in-cluster when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)

servers, err := db.GetClusterServersByName(t.Context(), "in-cluster")
require.NoError(t, err)
require.ElementsMatch(t, []string{v1alpha1.KubernetesInternalAPIServerAddr}, servers)
})
}

// TestClusterRaceConditionClusterSecrets reproduces a race condition
// on the cluster secrets. The test isn't asserting anything because
// before the fix it would cause a panic from concurrent map iteration and map write
Expand Down
Loading
Loading