Skip to content

Commit a0d3230

Browse files
committed
azurerm_kubernetes_cluster_node_pool: fix subnet lock to use resource ID instead of name
The subnet mutex in nodepool creation used `locks.MultipleByName` with just the subnet name as the lock key. This caused false positive lock contention when two nodepools in different VNets/clusters used subnets with the same name (e.g., "nodesubnet"), serializing operations that could safely run in parallel. Switch to `locks.MultipleByID` which uses the full Azure resource ID as the lock key, ensuring that only operations on the same actual subnet are serialized. This is consistent with the approach already used in `container_group_resource.go`. Add acceptance test `TestAccKubernetesClusterNodePool_parallelCrossVNetSameSubnetName` to verify parallel nodepool creation across different VNets with identically-named subnets.
1 parent 6412dae commit a0d3230

File tree

2 files changed

+131
-5
lines changed

2 files changed

+131
-5
lines changed

internal/services/containers/kubernetes_cluster_node_pool_resource.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -649,20 +649,20 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int
649649
profile.OsDiskType = pointer.To(agentpools.OSDiskType(osDiskType))
650650
}
651651

652-
subnetsToLock := make([]string, 0)
652+
subnetIDsToLock := make([]string, 0)
653653
if podSubnetID != nil {
654654
// Lock pod subnet to avoid race condition with AKS
655655
profile.PodSubnetID = pointer.To(podSubnetID.ID())
656-
subnetsToLock = append(subnetsToLock, podSubnetID.SubnetName)
656+
subnetIDsToLock = append(subnetIDsToLock, podSubnetID.ID())
657657
}
658658

659659
if nodeSubnetID != nil {
660660
// Lock node subnet to avoid race condition with AKS
661661
profile.VnetSubnetID = pointer.To(nodeSubnetID.ID())
662-
subnetsToLock = append(subnetsToLock, nodeSubnetID.SubnetName)
662+
subnetIDsToLock = append(subnetIDsToLock, nodeSubnetID.ID())
663663
}
664-
locks.MultipleByName(&subnetsToLock, network.SubnetResourceName)
665-
defer locks.UnlockMultipleByName(&subnetsToLock, network.SubnetResourceName)
664+
locks.MultipleByID(&subnetIDsToLock)
665+
defer locks.UnlockMultipleByID(&subnetIDsToLock)
666666

667667
if hostGroupID := d.Get("host_group_id").(string); hostGroupID != "" {
668668
profile.HostGroupID = pointer.To(hostGroupID)

internal/services/containers/kubernetes_cluster_node_pool_resource_test.go

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3857,3 +3857,129 @@ resource "azurerm_kubernetes_cluster_node_pool" "pool2" {
38573857
data.RandomInteger, // kubernetes_cluster dns_prefix
38583858
)
38593859
}
3860+
3861+
func TestAccKubernetesClusterNodePool_parallelCrossVNetSameSubnetName(t *testing.T) {
3862+
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
3863+
r := KubernetesClusterNodePoolResource{}
3864+
3865+
data.ResourceTest(t, r, []acceptance.TestStep{
3866+
{
3867+
Config: r.parallelCrossVNetSameSubnetNameConfig(data),
3868+
Check: acceptance.ComposeTestCheckFunc(
3869+
check.That("azurerm_kubernetes_cluster_node_pool.pool1").ExistsInAzure(r),
3870+
check.That("azurerm_kubernetes_cluster_node_pool.pool2").ExistsInAzure(r),
3871+
),
3872+
},
3873+
data.ImportStepFor("azurerm_kubernetes_cluster_node_pool.pool1"),
3874+
data.ImportStepFor("azurerm_kubernetes_cluster_node_pool.pool2"),
3875+
})
3876+
}
3877+
3878+
func (KubernetesClusterNodePoolResource) parallelCrossVNetSameSubnetNameConfig(data acceptance.TestData) string {
3879+
return fmt.Sprintf(`
3880+
provider "azurerm" {
3881+
features {}
3882+
}
3883+
3884+
resource "azurerm_resource_group" "test" {
3885+
name = "acctestRG-aks-%[1]d"
3886+
location = "%[2]s"
3887+
}
3888+
3889+
resource "azurerm_virtual_network" "test1" {
3890+
name = "acctestnw1-%[1]d"
3891+
address_space = ["10.0.0.0/8"]
3892+
location = azurerm_resource_group.test.location
3893+
resource_group_name = azurerm_resource_group.test.name
3894+
}
3895+
3896+
resource "azurerm_subnet" "nodesubnet1" {
3897+
name = "nodesubnet"
3898+
resource_group_name = azurerm_resource_group.test.name
3899+
virtual_network_name = azurerm_virtual_network.test1.name
3900+
address_prefixes = ["10.240.0.0/16"]
3901+
}
3902+
3903+
resource "azurerm_virtual_network" "test2" {
3904+
name = "acctestnw2-%[1]d"
3905+
address_space = ["172.16.0.0/12"]
3906+
location = azurerm_resource_group.test.location
3907+
resource_group_name = azurerm_resource_group.test.name
3908+
}
3909+
3910+
resource "azurerm_subnet" "nodesubnet2" {
3911+
name = "nodesubnet"
3912+
resource_group_name = azurerm_resource_group.test.name
3913+
virtual_network_name = azurerm_virtual_network.test2.name
3914+
address_prefixes = ["172.16.0.0/16"]
3915+
}
3916+
3917+
resource "azurerm_kubernetes_cluster" "test1" {
3918+
name = "acctestaks1%[1]d"
3919+
location = azurerm_resource_group.test.location
3920+
resource_group_name = azurerm_resource_group.test.name
3921+
dns_prefix = "acctestaks1%[1]d"
3922+
sku_tier = "Standard"
3923+
default_node_pool {
3924+
name = "default"
3925+
node_count = 1
3926+
vm_size = "Standard_DS2_v2"
3927+
vnet_subnet_id = azurerm_subnet.nodesubnet1.id
3928+
upgrade_settings {
3929+
max_surge = "10%%"
3930+
}
3931+
}
3932+
network_profile {
3933+
network_plugin = "azure"
3934+
}
3935+
identity {
3936+
type = "SystemAssigned"
3937+
}
3938+
}
3939+
3940+
resource "azurerm_kubernetes_cluster" "test2" {
3941+
name = "acctestaks2%[1]d"
3942+
location = azurerm_resource_group.test.location
3943+
resource_group_name = azurerm_resource_group.test.name
3944+
dns_prefix = "acctestaks2%[1]d"
3945+
sku_tier = "Standard"
3946+
default_node_pool {
3947+
name = "default"
3948+
node_count = 1
3949+
vm_size = "Standard_DS2_v2"
3950+
vnet_subnet_id = azurerm_subnet.nodesubnet2.id
3951+
upgrade_settings {
3952+
max_surge = "10%%"
3953+
}
3954+
}
3955+
network_profile {
3956+
network_plugin = "azure"
3957+
}
3958+
identity {
3959+
type = "SystemAssigned"
3960+
}
3961+
}
3962+
3963+
resource "azurerm_kubernetes_cluster_node_pool" "pool1" {
3964+
name = "pool1"
3965+
kubernetes_cluster_id = azurerm_kubernetes_cluster.test1.id
3966+
vm_size = "Standard_DS2_v2"
3967+
node_count = 1
3968+
vnet_subnet_id = azurerm_subnet.nodesubnet1.id
3969+
upgrade_settings {
3970+
max_surge = "10%%"
3971+
}
3972+
}
3973+
3974+
resource "azurerm_kubernetes_cluster_node_pool" "pool2" {
3975+
name = "pool2"
3976+
kubernetes_cluster_id = azurerm_kubernetes_cluster.test2.id
3977+
vm_size = "Standard_DS2_v2"
3978+
node_count = 1
3979+
vnet_subnet_id = azurerm_subnet.nodesubnet2.id
3980+
upgrade_settings {
3981+
max_surge = "10%%"
3982+
}
3983+
}
3984+
`, data.RandomInteger, data.Locations.Primary)
3985+
}

0 commit comments

Comments
 (0)