Skip to content

Commit 5c0b26e

Browse files
authored
fix: acl and flatcar skip oras repo tag when it is network isolated cluster (#8157)
1 parent d48a82e commit 5c0b26e

File tree

3 files changed

+81
-0
lines changed

3 files changed

+81
-0
lines changed

e2e/scenario_test.go

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -841,6 +841,75 @@ func Test_Flatcar_DisableSSH(t *testing.T) {
841841
})
842842
}
843843

844+
func Test_Flatcar_NetworkIsolatedCluster_NonAnonymousACR(t *testing.T) {
845+
RunScenario(t, &Scenario{
846+
Description: "Tests that a node using Flatcar VHD with network isolated cluster enabled",
847+
Tags: Tags{
848+
NetworkIsolated: true,
849+
NonAnonymousACR: true,
850+
},
851+
Config: Config{
852+
Cluster: ClusterAzureNetworkIsolated,
853+
VHD: config.VHDFlatcarGen2,
854+
BootstrapConfigMutator: func(nbc *datamodel.NodeBootstrappingConfiguration) {
855+
nbc.OutboundType = datamodel.OutboundTypeBlock
856+
nbc.ContainerService.Properties.SecurityProfile = &datamodel.SecurityProfile{
857+
PrivateEgress: &datamodel.PrivateEgress{
858+
Enabled: true,
859+
ContainerRegistryServer: fmt.Sprintf("%s.azurecr.io/aks-managed-repository", config.PrivateACRNameNotAnon(config.Config.DefaultLocation)),
860+
},
861+
}
862+
nbc.ContainerService.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity = true
863+
nbc.AgentPoolProfile.KubernetesConfig.UseManagedIdentity = true
864+
nbc.K8sComponents.LinuxCredentialProviderURL = fmt.Sprintf(
865+
"https://packages.aks.azure.com/cloud-provider-azure/v%s/binaries/azure-acr-credential-provider-linux-amd64-v%s.tar.gz",
866+
nbc.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion,
867+
nbc.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion)
868+
nbc.KubeletConfig["--image-credential-provider-config"] = "/var/lib/kubelet/credential-provider-config.yaml"
869+
nbc.KubeletConfig["--image-credential-provider-bin-dir"] = "/var/lib/kubelet/credential-provider"
870+
},
871+
Validator: func(ctx context.Context, s *Scenario) {
872+
},
873+
},
874+
})
875+
}
876+
877+
func Test_ACL_NetworkIsolatedCluster_NonAnonymousACR(t *testing.T) {
878+
RunScenario(t, &Scenario{
879+
Description: "Tests that a node using ACL VHD with network isolated cluster enabled",
880+
Tags: Tags{
881+
NetworkIsolated: true,
882+
NonAnonymousACR: true,
883+
},
884+
Config: Config{
885+
Cluster: ClusterAzureNetworkIsolated,
886+
VHD: config.VHDACLGen2TL,
887+
VMConfigMutator: func(vmss *armcompute.VirtualMachineScaleSet) {
888+
vmss.Properties = addTrustedLaunchToVMSS(vmss.Properties)
889+
},
890+
BootstrapConfigMutator: func(nbc *datamodel.NodeBootstrappingConfiguration) {
891+
nbc.OutboundType = datamodel.OutboundTypeBlock
892+
nbc.ContainerService.Properties.SecurityProfile = &datamodel.SecurityProfile{
893+
PrivateEgress: &datamodel.PrivateEgress{
894+
Enabled: true,
895+
ContainerRegistryServer: fmt.Sprintf("%s.azurecr.io/aks-managed-repository", config.PrivateACRNameNotAnon(config.Config.DefaultLocation)),
896+
},
897+
}
898+
nbc.ContainerService.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity = true
899+
nbc.AgentPoolProfile.KubernetesConfig.UseManagedIdentity = true
900+
nbc.K8sComponents.LinuxCredentialProviderURL = fmt.Sprintf(
901+
"https://packages.aks.azure.com/cloud-provider-azure/v%s/binaries/azure-acr-credential-provider-linux-amd64-v%s.tar.gz",
902+
nbc.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion,
903+
nbc.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion)
904+
nbc.KubeletConfig["--image-credential-provider-config"] = "/var/lib/kubelet/credential-provider-config.yaml"
905+
nbc.KubeletConfig["--image-credential-provider-bin-dir"] = "/var/lib/kubelet/credential-provider"
906+
},
907+
Validator: func(ctx context.Context, s *Scenario) {
908+
},
909+
},
910+
})
911+
}
912+
844913
func Test_AzureLinuxV3_NetworkIsolatedCluster_NonAnonymousACR(t *testing.T) {
845914
RunScenario(t, &Scenario{
846915
Description: "Tests that a node using a AzureLinuxV3 (CgroupV2) VHD can be properly bootstrapped",

parts/linux/cloud-init/artifacts/acl/cse_install_acl.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,12 @@ matchLocalSysext() {
3838

3939
matchRemoteSysext() {
4040
local seURL=$1 desiredVer=$2 seArch=$3
41+
if [ -n "${BOOTSTRAP_PROFILE_CONTAINER_REGISTRY_SERVER}" ]; then
42+
# For network isolated cluster, acr cache rule does not support oras repo tags.
43+
# return fixed renovateTag '-1' as workaround
44+
echo "v${desiredVer}-1-azlinux3-${seArch}"
45+
return 0
46+
fi
4147
# Match either arch-specific tags (v{ver}[.~-]*-azlinux3-{arch}) or exact version tags ({ver})
4248
retrycmd_silent 120 5 20 oras repo tags --registry-config "${ORAS_REGISTRY_CONFIG_FILE}" "${seURL}" | grep -Ex "(v${desiredVer//./\\.}[.~-].*-azlinux3-${seArch}|${desiredVer//./\\.})" | sort -V | tail -n1
4349
test ${PIPESTATUS[0]} -eq 0

parts/linux/cloud-init/artifacts/flatcar/cse_install_flatcar.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,12 @@ matchLocalSysext() {
2424

2525
matchRemoteSysext() {
2626
local seURL=$1 desiredVer=$2 seArch=$3
27+
if [ -n "${BOOTSTRAP_PROFILE_CONTAINER_REGISTRY_SERVER}" ]; then
28+
# For network isolated cluster, acr cache rule does not support oras repo tags.
29+
# return fixed renovateTag '-1' as workaround
30+
echo "v${desiredVer}-1-azlinux3-${seArch}"
31+
return 0
32+
fi
2733
retrycmd_silent 120 5 20 oras repo tags --registry-config "${ORAS_REGISTRY_CONFIG_FILE}" "${seURL}" | grep -Ex "v${desiredVer//./\\.}[.~-].*-azlinux3-${seArch}" | sort -V | tail -n1
2834
test ${PIPESTATUS[0]} -eq 0
2935
}

0 commit comments

Comments
 (0)