Skip to content

Commit bcc511f

Browse files
committed
Replication: Add e2e tests for workloads-related rbac objects
Signed-off-by: David Festal <[email protected]>
1 parent 6668a0c commit bcc511f

File tree

1 file changed

+201
-3
lines changed

1 file changed

+201
-3
lines changed

test/e2e/reconciler/cache/replication_test.go

+201-3
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"github.com/kcp-dev/logicalcluster/v3"
3131
"github.com/stretchr/testify/require"
3232

33+
rbacv1 "k8s.io/api/rbac/v1"
3334
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
3435
"k8s.io/apimachinery/pkg/api/errors"
3536
"k8s.io/apimachinery/pkg/api/meta"
@@ -64,6 +65,10 @@ var scenarios = []testScenario{
6465
{"TestReplicateAPIResourceSchemaNegative", replicateAPIResourceSchemaNegativeScenario},
6566
{"TestReplicateWorkspaceType", replicateWorkspaceTypeScenario},
6667
{"TestReplicateWorkspaceTypeNegative", replicateWorkspaceTypeNegativeScenario},
68+
{"TestReplicateWorkloadsClusterRole", replicateWorkloadsClusterRoleScenario},
69+
{"TestReplicateWorkloadsClusterRoleNegative", replicateWorkloadsClusterRoleNegativeScenario},
70+
{"TestReplicateWorkloadsClusterRoleBinding", replicateWorkloadsClusterRoleBindingScenario},
71+
{"TestReplicateWorkloadsClusterRoleBindingNegative", replicateWorkloadsClusterRoleBindingNegativeScenario},
6772
}
6873

6974
// disruptiveScenarios contains a list of scenarios that will be run in a private environment
@@ -330,7 +335,9 @@ func replicateResource(ctx context.Context, t *testing.T,
330335
kind string, /*kind for the given resource*/
331336
gvr schema.GroupVersionResource, /*gvr for the given resource*/
332337
res runtime.Object, /*a strongly typed resource object that will be created*/
333-
resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) {
338+
resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/
339+
prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/
340+
) {
334341
t.Helper()
335342

336343
orgPath, _ := framework.NewOrganizationFixture(t, server)
@@ -343,6 +350,10 @@ func replicateResource(ctx context.Context, t *testing.T,
343350
resourceName := resMeta.GetName()
344351
scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient}
345352

353+
for _, prepare := range prepares {
354+
prepare(scenario)
355+
}
356+
346357
t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName)
347358
scenario.CreateSourceResource(ctx, t, res)
348359
t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName)
@@ -383,7 +394,9 @@ func replicateResourceNegative(ctx context.Context, t *testing.T,
383394
kind string, /*kind for the given resource*/
384395
gvr schema.GroupVersionResource, /*gvr for the given resource*/
385396
res runtime.Object, /*a strongly typed resource object that will be created*/
386-
resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) {
397+
resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/
398+
prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/
399+
) {
387400
t.Helper()
388401

389402
orgPath, _ := framework.NewOrganizationFixture(t, server)
@@ -396,6 +409,10 @@ func replicateResourceNegative(ctx context.Context, t *testing.T,
396409
resourceName := resMeta.GetName()
397410
scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient}
398411

412+
for _, prepare := range prepares {
413+
prepare(scenario)
414+
}
415+
399416
t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName)
400417
scenario.CreateSourceResource(ctx, t, res)
401418
t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName)
@@ -486,6 +503,14 @@ type replicateResourceScenario struct {
486503
cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface
487504
}
488505

506+
func (b *replicateResourceScenario) CreateAdditionalResource(ctx context.Context, t *testing.T, res runtime.Object, kind string, gvr schema.GroupVersionResource) {
507+
t.Helper()
508+
resUnstructured, err := toUnstructured(res, kind, gvr)
509+
require.NoError(t, err)
510+
_, err = b.kcpShardClusterDynamicClient.Resource(gvr).Cluster(b.cluster.Path()).Create(ctx, resUnstructured, metav1.CreateOptions{})
511+
require.NoError(t, err)
512+
}
513+
489514
func (b *replicateResourceScenario) CreateSourceResource(ctx context.Context, t *testing.T, res runtime.Object) {
490515
t.Helper()
491516
resUnstructured, err := toUnstructured(res, b.kind, b.gvr)
@@ -678,14 +703,23 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context.
678703
}
679704
unstructured.RemoveNestedField(originalResource.Object, "metadata", "resourceVersion")
680705
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "resourceVersion")
706+
707+
// TODO(davidfestal): find out why the generation is not equal, specially for rbacv1.
708+
// Is it a characteristic of all built-in KCP resources (which are not backed by CRDs) ?
709+
// Issue opened: https://github.com/kcp-dev/kcp/issues/2935
710+
if b.gvr.Group == rbacv1.SchemeGroupVersion.Group {
711+
unstructured.RemoveNestedField(originalResource.Object, "metadata", "generation")
712+
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "generation")
713+
}
714+
681715
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "annotations", genericapirequest.AnnotationKey)
682716
if cachedStatus, ok := cachedResource.Object["status"]; ok && cachedStatus == nil || (cachedStatus != nil && len(cachedStatus.(map[string]interface{})) == 0) {
683717
// TODO: worth investigating:
684718
// for some reason cached resources have an empty status set whereas the original resources don't
685719
unstructured.RemoveNestedField(cachedResource.Object, "status")
686720
}
687721
if diff := cmp.Diff(cachedResource.Object, originalResource.Object); len(diff) > 0 {
688-
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original", b.gvr, cluster, cachedResourceMeta.GetName())
722+
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original: %s", b.gvr, cluster, cachedResourceMeta.GetName(), diff)
689723
}
690724
return true, ""
691725
}, wait.ForeverTestTimeout, 100*time.Millisecond)
@@ -732,3 +766,167 @@ func createCacheClientConfigForEnvironment(ctx context.Context, t *testing.T, kc
732766
require.NoError(t, err)
733767
return cacheServerRestConfig
734768
}
769+
770+
// replicateWorkloadsClusterRoleScenario tests if a ClusterRole related to workloads API is propagated to the cache server.
771+
// The test exercises creation, modification and removal of the Shard object.
772+
func replicateWorkloadsClusterRoleScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
773+
t.Helper()
774+
replicateResource(ctx,
775+
t,
776+
server,
777+
kcpShardClusterDynamicClient,
778+
cacheKcpClusterDynamicClient,
779+
"",
780+
"ClusterRole",
781+
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
782+
&rbacv1.ClusterRole{
783+
ObjectMeta: metav1.ObjectMeta{
784+
Name: withPseudoRandomSuffix("syncer"),
785+
},
786+
Rules: []rbacv1.PolicyRule{
787+
{
788+
Verbs: []string{"sync"},
789+
APIGroups: []string{"workload.kcp.io"},
790+
Resources: []string{"synctargets"},
791+
ResourceNames: []string{"asynctarget"},
792+
},
793+
},
794+
},
795+
nil,
796+
)
797+
}
798+
799+
// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
800+
func replicateWorkloadsClusterRoleNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
801+
t.Helper()
802+
replicateResourceNegative(
803+
ctx,
804+
t,
805+
server,
806+
kcpShardClusterDynamicClient,
807+
cacheKcpClusterDynamicClient,
808+
"",
809+
"ClusterRole",
810+
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
811+
&rbacv1.ClusterRole{
812+
ObjectMeta: metav1.ObjectMeta{
813+
Name: withPseudoRandomSuffix("syncer"),
814+
},
815+
Rules: []rbacv1.PolicyRule{
816+
{
817+
Verbs: []string{"sync"},
818+
APIGroups: []string{"workload.kcp.io"},
819+
Resources: []string{"synctargets"},
820+
ResourceNames: []string{"asynctarget"},
821+
},
822+
},
823+
},
824+
nil,
825+
)
826+
}
827+
828+
// replicateWorkloadsClusterRoleBindingScenario tests if a ClusterRoleBinding related to workloads API is propagated to the cache server.
829+
// The test exercises creation, modification and removal of the Shard object.
830+
func replicateWorkloadsClusterRoleBindingScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
831+
t.Helper()
832+
833+
clusterRole := &rbacv1.ClusterRole{
834+
ObjectMeta: metav1.ObjectMeta{
835+
Name: withPseudoRandomSuffix("syncer"),
836+
},
837+
Rules: []rbacv1.PolicyRule{
838+
{
839+
Verbs: []string{"sync"},
840+
APIGroups: []string{"workload.kcp.io"},
841+
Resources: []string{"synctargets"},
842+
ResourceNames: []string{"asynctarget"},
843+
},
844+
},
845+
}
846+
847+
replicateResource(ctx,
848+
t,
849+
server,
850+
kcpShardClusterDynamicClient,
851+
cacheKcpClusterDynamicClient,
852+
"",
853+
"ClusterRoleBinding",
854+
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
855+
&rbacv1.ClusterRoleBinding{
856+
ObjectMeta: metav1.ObjectMeta{
857+
Name: withPseudoRandomSuffix("syncer"),
858+
},
859+
RoleRef: rbacv1.RoleRef{
860+
APIGroup: rbacv1.SchemeGroupVersion.Group,
861+
Kind: "ClusterRole",
862+
Name: clusterRole.Name,
863+
},
864+
Subjects: []rbacv1.Subject{
865+
{
866+
Kind: "ServiceAccount",
867+
APIGroup: "",
868+
Name: "kcp-syncer-0000",
869+
Namespace: "kcp-syncer-namespace",
870+
},
871+
},
872+
},
873+
nil,
874+
func(scenario *replicateResourceScenario) {
875+
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
876+
scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles"))
877+
},
878+
)
879+
}
880+
881+
// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
882+
func replicateWorkloadsClusterRoleBindingNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
883+
t.Helper()
884+
885+
clusterRole := &rbacv1.ClusterRole{
886+
ObjectMeta: metav1.ObjectMeta{
887+
Name: withPseudoRandomSuffix("syncer"),
888+
},
889+
Rules: []rbacv1.PolicyRule{
890+
{
891+
Verbs: []string{"sync"},
892+
APIGroups: []string{"workload.kcp.io"},
893+
Resources: []string{"synctargets"},
894+
ResourceNames: []string{"asynctarget"},
895+
},
896+
},
897+
}
898+
899+
replicateResourceNegative(
900+
ctx,
901+
t,
902+
server,
903+
kcpShardClusterDynamicClient,
904+
cacheKcpClusterDynamicClient,
905+
"",
906+
"ClusterRoleBinding",
907+
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
908+
&rbacv1.ClusterRoleBinding{
909+
ObjectMeta: metav1.ObjectMeta{
910+
Name: withPseudoRandomSuffix("syncer"),
911+
},
912+
RoleRef: rbacv1.RoleRef{
913+
APIGroup: rbacv1.SchemeGroupVersion.Group,
914+
Kind: "ClusterRole",
915+
Name: clusterRole.Name,
916+
},
917+
Subjects: []rbacv1.Subject{
918+
{
919+
Kind: "ServiceAccount",
920+
APIGroup: "",
921+
Name: "kcp-syncer-0000",
922+
Namespace: "kcp-syncer-namespace",
923+
},
924+
},
925+
},
926+
nil,
927+
func(scenario *replicateResourceScenario) {
928+
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
929+
scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles"))
930+
},
931+
)
932+
}

0 commit comments

Comments
 (0)