Skip to content

Commit e9d7906

Browse files
authored
Skip Interlocked.Exchange and FlushConfig when no update detected (#905)
* skip Interlocked.Exchange and FlushConfig when no update detected on merge * special case of zero epoch necessitates tracking of explicit slot update * version-bump-1.0.50 * ensure src is owner * update migrateslotwalk test
1 parent af9cf0e commit e9d7906

File tree

3 files changed

+22
-12
lines changed

3 files changed

+22
-12
lines changed

Version.props

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
<Project>
22
<!-- Versioning property for builds and packages -->
33
<PropertyGroup>
4-
<VersionPrefix>1.0.49</VersionPrefix>
4+
<VersionPrefix>1.0.50</VersionPrefix>
55
</PropertyGroup>
66
</Project>

libs/cluster/Server/ClusterConfig.cs

+7-1
Original file line numberDiff line numberDiff line change
@@ -943,6 +943,8 @@ private ClusterConfig MergeWorkerInfo(Worker worker)
943943

944944
public ClusterConfig MergeSlotMap(ClusterConfig senderConfig, ILogger logger = null)
945945
{
946+
// Track if update happened to avoid expensive merge and FlushConfig operation when possible
947+
var updated = false;
946948
var senderSlotMap = senderConfig.slotMap;
947949
var senderWorkerId = GetWorkerIdFromNodeId(senderConfig.LocalNodeId);
948950

@@ -979,12 +981,16 @@ public ClusterConfig MergeSlotMap(ClusterConfig senderConfig, ILogger logger = n
979981
if (senderConfig.LocalNodeConfigEpoch != 0 && workers[currentOwnerId].ConfigEpoch >= senderConfig.LocalNodeConfigEpoch)
980982
continue;
981983

984+
// Update happened only if workerId or state changed
985+
// NOTE: this avoids message flooding when sender epoch equals zero
986+
updated = newSlotMap[i]._workerId != senderWorkerId || newSlotMap[i]._state != SlotState.STABLE;
987+
982988
// Update ownership of node
983989
newSlotMap[i]._workerId = senderWorkerId;
984990
newSlotMap[i]._state = SlotState.STABLE;
985991
}
986992

987-
return new(newSlotMap, workers);
993+
return updated ? new(newSlotMap, workers) : this;
988994
}
989995

990996
/// <summary>

test/Garnet.test.cluster/ClusterMigrateTests.cs

+14-10
Original file line numberDiff line numberDiff line change
@@ -1808,7 +1808,7 @@ void MigrateKeys()
18081808
var status = context.clusterTestUtils.SetSlot(_tgt, slot, "IMPORTING", nodeIds[_src], logger: context.logger);
18091809
while (string.IsNullOrEmpty(status) || !status.Equals("OK"))
18101810
{
1811-
SetSlot(_src);
1811+
SetSlot(_src, slot);
18121812
ClusterTestUtils.BackOff(cancellationToken: cancellationToken, msg: $"{nodeIds[_src]}({nodeEndpoints[_src].Port}) > {slot} > {nodeIds[_tgt]}({nodeEndpoints[_tgt].Port})");
18131813
status = context.clusterTestUtils.SetSlot(_tgt, slot, "IMPORTING", nodeIds[_src], logger: context.logger);
18141814
}
@@ -1847,6 +1847,8 @@ void MigrateSlots()
18471847
var node = config.GetBySlot(slot);
18481848
if (node != null && node.NodeId.Equals(nodeIds[_src]))
18491849
break;
1850+
// Force set slot to src node
1851+
SetSlot(_src, slot);
18501852
ClusterTestUtils.BackOff(cancellationToken: cancellationToken);
18511853
}
18521854

@@ -1878,15 +1880,6 @@ void MigrateSlots()
18781880
ClusterTestUtils.BackOff(cancellationToken: cancellationToken);
18791881
}
18801882
}
1881-
1882-
void SetSlot(int nodeIndex)
1883-
{
1884-
for (var i = 0; i < shards; i++)
1885-
{
1886-
var resp = context.clusterTestUtils.SetSlot(i, slot, "NODE", nodeIds[nodeIndex], logger: context.logger);
1887-
ClassicAssert.AreEqual("OK", resp);
1888-
}
1889-
}
18901883
}
18911884

18921885
ValidateConfig();
@@ -1932,14 +1925,25 @@ void ValidateConfig()
19321925

19331926
if (node == null || nodeIds[shards - 1] != node.NodeId)
19341927
{
1928+
// If failed to converge start from the beginning and backOff to give time to converge
19351929
i = 0;
1930+
SetSlot(shards - 1, slot);
19361931
ClusterTestUtils.BackOff(cancellationToken: cancellationToken);
19371932
continue;
19381933
}
19391934
ClassicAssert.AreEqual(nodeIds[shards - 1], node.NodeId);
19401935
}
19411936
}
19421937
}
1938+
1939+
void SetSlot(int nodeIndex, int slot)
1940+
{
1941+
for (var i = 0; i < shards; i++)
1942+
{
1943+
var resp = context.clusterTestUtils.SetSlot(i, slot, "NODE", nodeIds[nodeIndex], logger: context.logger);
1944+
ClassicAssert.AreEqual("OK", resp);
1945+
}
1946+
}
19431947
}
19441948
}
19451949
}

0 commit comments

Comments
 (0)