Skip to content

Commit 1e6ee77

Browse files
committed
Replication: Add e2e tests for workloads-related rbac objects
Signed-off-by: David Festal <[email protected]>
1 parent 8c48780 commit 1e6ee77

File tree

1 file changed

+200
-3
lines changed

1 file changed

+200
-3
lines changed

test/e2e/reconciler/cache/replication_test.go

+200-3
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"github.com/kcp-dev/logicalcluster/v3"
3131
"github.com/stretchr/testify/require"
3232

33+
rbacv1 "k8s.io/api/rbac/v1"
3334
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
3435
"k8s.io/apimachinery/pkg/api/errors"
3536
"k8s.io/apimachinery/pkg/api/meta"
@@ -64,6 +65,10 @@ var scenarios = []testScenario{
6465
{"TestReplicateAPIResourceSchemaNegative", replicateAPIResourceSchemaNegativeScenario},
6566
{"TestReplicateWorkspaceType", replicateWorkspaceTypeScenario},
6667
{"TestReplicateWorkspaceTypeNegative", replicateWorkspaceTypeNegativeScenario},
68+
{"TestReplicateWorkloadsClusterRole", replicateWorkloadsClusterRoleScenario},
69+
{"TestReplicateWorkloadsClusterRoleNegative", replicateWorkloadsClusterRoleNegativeScenario},
70+
{"TestReplicateWorkloadsClusterRoleBinding", replicateWorkloadsClusterRoleBindingScenario},
71+
{"TestReplicateWorkloadsClusterRoleBindingNegative", replicateWorkloadsClusterRoleBindingNegativeScenario},
6772
}
6873

6974
// disruptiveScenarios contains a list of scenarios that will be run in a private environment
@@ -330,7 +335,9 @@ func replicateResource(ctx context.Context, t *testing.T,
330335
kind string, /*kind for the given resource*/
331336
gvr schema.GroupVersionResource, /*gvr for the given resource*/
332337
res runtime.Object, /*a strongly typed resource object that will be created*/
333-
resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) {
338+
resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/
339+
prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/
340+
) {
334341
t.Helper()
335342

336343
orgPath, _ := framework.NewOrganizationFixture(t, server)
@@ -343,6 +350,10 @@ func replicateResource(ctx context.Context, t *testing.T,
343350
resourceName := resMeta.GetName()
344351
scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient}
345352

353+
for _, prepare := range prepares {
354+
prepare(scenario)
355+
}
356+
346357
t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName)
347358
scenario.CreateSourceResource(ctx, t, res)
348359
t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName)
@@ -383,7 +394,9 @@ func replicateResourceNegative(ctx context.Context, t *testing.T,
383394
kind string, /*kind for the given resource*/
384395
gvr schema.GroupVersionResource, /*gvr for the given resource*/
385396
res runtime.Object, /*a strongly typed resource object that will be created*/
386-
resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) {
397+
resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/
398+
prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/
399+
) {
387400
t.Helper()
388401

389402
orgPath, _ := framework.NewOrganizationFixture(t, server)
@@ -396,6 +409,10 @@ func replicateResourceNegative(ctx context.Context, t *testing.T,
396409
resourceName := resMeta.GetName()
397410
scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient}
398411

412+
for _, prepare := range prepares {
413+
prepare(scenario)
414+
}
415+
399416
t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName)
400417
scenario.CreateSourceResource(ctx, t, res)
401418
t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName)
@@ -486,6 +503,14 @@ type replicateResourceScenario struct {
486503
cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface
487504
}
488505

506+
func (b *replicateResourceScenario) CreateAdditionalResource(ctx context.Context, t *testing.T, res runtime.Object, kind string, gvr schema.GroupVersionResource) {
507+
t.Helper()
508+
resUnstructured, err := toUnstructured(res, kind, gvr)
509+
require.NoError(t, err)
510+
_, err = b.kcpShardClusterDynamicClient.Resource(gvr).Cluster(b.cluster.Path()).Create(ctx, resUnstructured, metav1.CreateOptions{})
511+
require.NoError(t, err)
512+
}
513+
489514
func (b *replicateResourceScenario) CreateSourceResource(ctx context.Context, t *testing.T, res runtime.Object) {
490515
t.Helper()
491516
resUnstructured, err := toUnstructured(res, b.kind, b.gvr)
@@ -678,14 +703,22 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context.
678703
}
679704
unstructured.RemoveNestedField(originalResource.Object, "metadata", "resourceVersion")
680705
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "resourceVersion")
706+
707+
// TODO(davidfestal): find out why the generation is not the same especially for rbacv1. Is it a characteristic of all
708+
// internal KCP resources (which are not backed by CRDs) ?
709+
if b.gvr.Group == rbacv1.SchemeGroupVersion.Group {
710+
unstructured.RemoveNestedField(originalResource.Object, "metadata", "generation")
711+
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "generation")
712+
}
713+
681714
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "annotations", genericapirequest.AnnotationKey)
682715
if cachedStatus, ok := cachedResource.Object["status"]; ok && cachedStatus == nil || (cachedStatus != nil && len(cachedStatus.(map[string]interface{})) == 0) {
683716
// TODO: worth investigating:
684717
// for some reason cached resources have an empty status set whereas the original resources don't
685718
unstructured.RemoveNestedField(cachedResource.Object, "status")
686719
}
687720
if diff := cmp.Diff(cachedResource.Object, originalResource.Object); len(diff) > 0 {
688-
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original", b.gvr, cluster, cachedResourceMeta.GetName())
721+
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original: %s", b.gvr, cluster, cachedResourceMeta.GetName(), diff)
689722
}
690723
return true, ""
691724
}, wait.ForeverTestTimeout, 100*time.Millisecond)
@@ -732,3 +765,167 @@ func createCacheClientConfigForEnvironment(ctx context.Context, t *testing.T, kc
732765
require.NoError(t, err)
733766
return cacheServerRestConfig
734767
}
768+
769+
// replicateWorkloadsClusterRoleScenario tests if a ClusterRole related to workloads API is propagated to the cache server.
770+
// The test exercises creation, modification and removal of the Shard object.
771+
func replicateWorkloadsClusterRoleScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
772+
t.Helper()
773+
replicateResource(ctx,
774+
t,
775+
server,
776+
kcpShardClusterDynamicClient,
777+
cacheKcpClusterDynamicClient,
778+
"",
779+
"ClusterRole",
780+
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
781+
&rbacv1.ClusterRole{
782+
ObjectMeta: metav1.ObjectMeta{
783+
Name: withPseudoRandomSuffix("syncer"),
784+
},
785+
Rules: []rbacv1.PolicyRule{
786+
{
787+
Verbs: []string{"sync"},
788+
APIGroups: []string{"workload.kcp.io"},
789+
Resources: []string{"synctargets"},
790+
ResourceNames: []string{"asynctarget"},
791+
},
792+
},
793+
},
794+
nil,
795+
)
796+
}
797+
798+
// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
799+
func replicateWorkloadsClusterRoleNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
800+
t.Helper()
801+
replicateResourceNegative(
802+
ctx,
803+
t,
804+
server,
805+
kcpShardClusterDynamicClient,
806+
cacheKcpClusterDynamicClient,
807+
"",
808+
"ClusterRole",
809+
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
810+
&rbacv1.ClusterRole{
811+
ObjectMeta: metav1.ObjectMeta{
812+
Name: withPseudoRandomSuffix("syncer"),
813+
},
814+
Rules: []rbacv1.PolicyRule{
815+
{
816+
Verbs: []string{"sync"},
817+
APIGroups: []string{"workload.kcp.io"},
818+
Resources: []string{"synctargets"},
819+
ResourceNames: []string{"asynctarget"},
820+
},
821+
},
822+
},
823+
nil,
824+
)
825+
}
826+
827+
// replicateWorkloadsClusterRoleBindingScenario tests if a ClusterRoleBinding related to workloads API is propagated to the cache server.
828+
// The test exercises creation, modification and removal of the Shard object.
829+
func replicateWorkloadsClusterRoleBindingScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
830+
t.Helper()
831+
832+
clusterRole := &rbacv1.ClusterRole{
833+
ObjectMeta: metav1.ObjectMeta{
834+
Name: withPseudoRandomSuffix("syncer"),
835+
},
836+
Rules: []rbacv1.PolicyRule{
837+
{
838+
Verbs: []string{"sync"},
839+
APIGroups: []string{"workload.kcp.io"},
840+
Resources: []string{"synctargets"},
841+
ResourceNames: []string{"asynctarget"},
842+
},
843+
},
844+
}
845+
846+
replicateResource(ctx,
847+
t,
848+
server,
849+
kcpShardClusterDynamicClient,
850+
cacheKcpClusterDynamicClient,
851+
"",
852+
"ClusterRoleBinding",
853+
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
854+
&rbacv1.ClusterRoleBinding{
855+
ObjectMeta: metav1.ObjectMeta{
856+
Name: withPseudoRandomSuffix("syncer"),
857+
},
858+
RoleRef: rbacv1.RoleRef{
859+
APIGroup: rbacv1.SchemeGroupVersion.Group,
860+
Kind: "ClusterRole",
861+
Name: clusterRole.Name,
862+
},
863+
Subjects: []rbacv1.Subject{
864+
{
865+
Kind: "ServiceAccount",
866+
APIGroup: "",
867+
Name: "kcp-syncer-0000",
868+
Namespace: "kcp-syncer-namespace",
869+
},
870+
},
871+
},
872+
nil,
873+
func(scenario *replicateResourceScenario) {
874+
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
875+
scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles"))
876+
},
877+
)
878+
}
879+
880+
// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
881+
func replicateWorkloadsClusterRoleBindingNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
882+
t.Helper()
883+
884+
clusterRole := &rbacv1.ClusterRole{
885+
ObjectMeta: metav1.ObjectMeta{
886+
Name: withPseudoRandomSuffix("syncer"),
887+
},
888+
Rules: []rbacv1.PolicyRule{
889+
{
890+
Verbs: []string{"sync"},
891+
APIGroups: []string{"workload.kcp.io"},
892+
Resources: []string{"synctargets"},
893+
ResourceNames: []string{"asynctarget"},
894+
},
895+
},
896+
}
897+
898+
replicateResourceNegative(
899+
ctx,
900+
t,
901+
server,
902+
kcpShardClusterDynamicClient,
903+
cacheKcpClusterDynamicClient,
904+
"",
905+
"ClusterRoleBinding",
906+
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
907+
&rbacv1.ClusterRoleBinding{
908+
ObjectMeta: metav1.ObjectMeta{
909+
Name: withPseudoRandomSuffix("syncer"),
910+
},
911+
RoleRef: rbacv1.RoleRef{
912+
APIGroup: rbacv1.SchemeGroupVersion.Group,
913+
Kind: "ClusterRole",
914+
Name: clusterRole.Name,
915+
},
916+
Subjects: []rbacv1.Subject{
917+
{
918+
Kind: "ServiceAccount",
919+
APIGroup: "",
920+
Name: "kcp-syncer-0000",
921+
Namespace: "kcp-syncer-namespace",
922+
},
923+
},
924+
},
925+
nil,
926+
func(scenario *replicateResourceScenario) {
927+
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
928+
scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles"))
929+
},
930+
)
931+
}

0 commit comments

Comments
 (0)