Skip to content

Commit ae8c468

Browse files
committed
Replication: Add e2e tests for workloads-related rbac objects
Signed-off-by: David Festal <[email protected]>
1 parent 6668a0c commit ae8c468

File tree

1 file changed

+193
-1
lines changed

1 file changed

+193
-1
lines changed

test/e2e/reconciler/cache/replication_test.go

+193-1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"github.com/kcp-dev/logicalcluster/v3"
3131
"github.com/stretchr/testify/require"
3232

33+
rbacv1 "k8s.io/api/rbac/v1"
3334
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
3435
"k8s.io/apimachinery/pkg/api/errors"
3536
"k8s.io/apimachinery/pkg/api/meta"
@@ -64,6 +65,10 @@ var scenarios = []testScenario{
6465
{"TestReplicateAPIResourceSchemaNegative", replicateAPIResourceSchemaNegativeScenario},
6566
{"TestReplicateWorkspaceType", replicateWorkspaceTypeScenario},
6667
{"TestReplicateWorkspaceTypeNegative", replicateWorkspaceTypeNegativeScenario},
68+
{"TestReplicateWorkloadsClusterRole", replicateWorkloadsClusterRoleScenario},
69+
{"TestReplicateWorkloadsClusterRoleNegative", replicateWorkloadsClusterRoleNegativeScenario},
70+
{"TestReplicateWorkloadsClusterRoleBinding", replicateWorkloadsClusterRoleBindingScenario},
71+
{"TestReplicateWorkloadsClusterRoleBindingNegative", replicateWorkloadsClusterRoleBindingNegativeScenario},
6772
}
6873

6974
// disruptiveScenarios contains a list of scenarios that will be run in a private environment
@@ -678,14 +683,23 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context.
678683
}
679684
unstructured.RemoveNestedField(originalResource.Object, "metadata", "resourceVersion")
680685
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "resourceVersion")
686+
687+
// TODO(davidfestal): find out why the generation is not equal, specially for rbacv1.
688+
// Is it a characteristic of all built-in KCP resources (which are not backed by CRDs) ?
689+
// Issue opened: https://github.com/kcp-dev/kcp/issues/2935
690+
if b.gvr.Group == rbacv1.SchemeGroupVersion.Group {
691+
unstructured.RemoveNestedField(originalResource.Object, "metadata", "generation")
692+
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "generation")
693+
}
694+
681695
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "annotations", genericapirequest.AnnotationKey)
682696
if cachedStatus, ok := cachedResource.Object["status"]; ok && cachedStatus == nil || (cachedStatus != nil && len(cachedStatus.(map[string]interface{})) == 0) {
683697
// TODO: worth investigating:
684698
// for some reason cached resources have an empty status set whereas the original resources don't
685699
unstructured.RemoveNestedField(cachedResource.Object, "status")
686700
}
687701
if diff := cmp.Diff(cachedResource.Object, originalResource.Object); len(diff) > 0 {
688-
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original", b.gvr, cluster, cachedResourceMeta.GetName())
702+
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original: %s", b.gvr, cluster, cachedResourceMeta.GetName(), diff)
689703
}
690704
return true, ""
691705
}, wait.ForeverTestTimeout, 100*time.Millisecond)
@@ -732,3 +746,181 @@ func createCacheClientConfigForEnvironment(ctx context.Context, t *testing.T, kc
732746
require.NoError(t, err)
733747
return cacheServerRestConfig
734748
}
749+
750+
// replicateWorkloadsClusterRoleScenario tests if a ClusterRole related to workloads API is propagated to the cache server.
751+
// The test exercises creation, modification and removal of the Shard object.
752+
func replicateWorkloadsClusterRoleScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
753+
t.Helper()
754+
replicateResource(ctx,
755+
t,
756+
server,
757+
kcpShardClusterDynamicClient,
758+
cacheKcpClusterDynamicClient,
759+
"",
760+
"ClusterRole",
761+
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
762+
&rbacv1.ClusterRole{
763+
ObjectMeta: metav1.ObjectMeta{
764+
Name: withPseudoRandomSuffix("syncer"),
765+
},
766+
Rules: []rbacv1.PolicyRule{
767+
{
768+
Verbs: []string{"sync"},
769+
APIGroups: []string{"workload.kcp.io"},
770+
Resources: []string{"synctargets"},
771+
ResourceNames: []string{"asynctarget"},
772+
},
773+
},
774+
},
775+
nil,
776+
)
777+
}
778+
779+
// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
780+
func replicateWorkloadsClusterRoleNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
781+
t.Helper()
782+
replicateResourceNegative(
783+
ctx,
784+
t,
785+
server,
786+
kcpShardClusterDynamicClient,
787+
cacheKcpClusterDynamicClient,
788+
"",
789+
"ClusterRole",
790+
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
791+
&rbacv1.ClusterRole{
792+
ObjectMeta: metav1.ObjectMeta{
793+
Name: withPseudoRandomSuffix("syncer"),
794+
},
795+
Rules: []rbacv1.PolicyRule{
796+
{
797+
Verbs: []string{"sync"},
798+
APIGroups: []string{"workload.kcp.io"},
799+
Resources: []string{"synctargets"},
800+
ResourceNames: []string{"asynctarget"},
801+
},
802+
},
803+
},
804+
nil,
805+
)
806+
}
807+
808+
// replicateWorkloadsClusterRoleBindingScenario tests if a ClusterRoleBinding related to workloads API is propagated to the cache server.
809+
// The test exercises creation, modification and removal of the Shard object.
810+
func replicateWorkloadsClusterRoleBindingScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
811+
t.Helper()
812+
813+
clusterRole := &rbacv1.ClusterRole{
814+
ObjectMeta: metav1.ObjectMeta{
815+
Name: withPseudoRandomSuffix("syncer"),
816+
},
817+
Rules: []rbacv1.PolicyRule{
818+
{
819+
Verbs: []string{"sync"},
820+
APIGroups: []string{"workload.kcp.io"},
821+
Resources: []string{"synctargets"},
822+
ResourceNames: []string{"asynctarget"},
823+
},
824+
},
825+
}
826+
827+
orgPath, _ := framework.NewOrganizationFixture(t, server)
828+
_, ws := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithRootShard())
829+
clusterName := logicalcluster.Name(ws.Spec.Cluster)
830+
831+
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
832+
clusterRoleGVR := rbacv1.SchemeGroupVersion.WithResource("clusterroles")
833+
clusterRoleUnstr, err := toUnstructured(clusterRole, "ClusterRole", clusterRoleGVR)
834+
require.NoError(t, err)
835+
_, err = kcpShardClusterDynamicClient.Resource(clusterRoleGVR).Cluster(clusterName.Path()).Create(ctx, clusterRoleUnstr, metav1.CreateOptions{})
836+
require.NoError(t, err)
837+
838+
replicateResource(ctx,
839+
t,
840+
server,
841+
kcpShardClusterDynamicClient,
842+
cacheKcpClusterDynamicClient,
843+
clusterName,
844+
"ClusterRoleBinding",
845+
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
846+
&rbacv1.ClusterRoleBinding{
847+
ObjectMeta: metav1.ObjectMeta{
848+
Name: withPseudoRandomSuffix("syncer"),
849+
},
850+
RoleRef: rbacv1.RoleRef{
851+
APIGroup: rbacv1.SchemeGroupVersion.Group,
852+
Kind: "ClusterRole",
853+
Name: clusterRole.Name,
854+
},
855+
Subjects: []rbacv1.Subject{
856+
{
857+
Kind: "ServiceAccount",
858+
APIGroup: "",
859+
Name: "kcp-syncer-0000",
860+
Namespace: "kcp-syncer-namespace",
861+
},
862+
},
863+
},
864+
nil,
865+
)
866+
}
867+
868+
// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
869+
func replicateWorkloadsClusterRoleBindingNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
870+
t.Helper()
871+
872+
clusterRole := &rbacv1.ClusterRole{
873+
ObjectMeta: metav1.ObjectMeta{
874+
Name: withPseudoRandomSuffix("syncer"),
875+
},
876+
Rules: []rbacv1.PolicyRule{
877+
{
878+
Verbs: []string{"sync"},
879+
APIGroups: []string{"workload.kcp.io"},
880+
Resources: []string{"synctargets"},
881+
ResourceNames: []string{"asynctarget"},
882+
},
883+
},
884+
}
885+
886+
orgPath, _ := framework.NewOrganizationFixture(t, server)
887+
_, ws := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithRootShard())
888+
clusterName := logicalcluster.Name(ws.Spec.Cluster)
889+
890+
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
891+
clusterRoleGVR := rbacv1.SchemeGroupVersion.WithResource("clusterroles")
892+
clusterRoleUnstr, err := toUnstructured(clusterRole, "ClusterRole", clusterRoleGVR)
893+
require.NoError(t, err)
894+
_, err = kcpShardClusterDynamicClient.Resource(clusterRoleGVR).Cluster(clusterName.Path()).Create(ctx, clusterRoleUnstr, metav1.CreateOptions{})
895+
require.NoError(t, err)
896+
897+
replicateResourceNegative(
898+
ctx,
899+
t,
900+
server,
901+
kcpShardClusterDynamicClient,
902+
cacheKcpClusterDynamicClient,
903+
clusterName,
904+
"ClusterRoleBinding",
905+
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
906+
&rbacv1.ClusterRoleBinding{
907+
ObjectMeta: metav1.ObjectMeta{
908+
Name: withPseudoRandomSuffix("syncer"),
909+
},
910+
RoleRef: rbacv1.RoleRef{
911+
APIGroup: rbacv1.SchemeGroupVersion.Group,
912+
Kind: "ClusterRole",
913+
Name: clusterRole.Name,
914+
},
915+
Subjects: []rbacv1.Subject{
916+
{
917+
Kind: "ServiceAccount",
918+
APIGroup: "",
919+
Name: "kcp-syncer-0000",
920+
Namespace: "kcp-syncer-namespace",
921+
},
922+
},
923+
},
924+
nil,
925+
)
926+
}

0 commit comments

Comments
 (0)