|
| 1 | +// The MIT License |
| 2 | +// |
| 3 | +// Copyright (c) 2025 Temporal Technologies Inc. All rights reserved. |
| 4 | +// |
| 5 | +// Permission is hereby granted, free of charge, to any person obtaining a copy |
| 6 | +// of this software and associated documentation files (the "Software"), to deal |
| 7 | +// in the Software without restriction, including without limitation the rights |
| 8 | +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 9 | +// copies of the Software, and to permit persons to whom the Software is |
| 10 | +// furnished to do so, subject to the following conditions: |
| 11 | +// |
| 12 | +// The above copyright notice and this permission notice shall be included in |
| 13 | +// all copies or substantial portions of the Software. |
| 14 | +// |
| 15 | +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 18 | +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 21 | +// THE SOFTWARE. |
| 22 | + |
| 23 | +package tests |
| 24 | + |
| 25 | +import ( |
| 26 | + "context" |
| 27 | + "fmt" |
| 28 | + "testing" |
| 29 | + "time" |
| 30 | + |
| 31 | + "github.com/stretchr/testify/suite" |
| 32 | + commonpb "go.temporal.io/api/common/v1" |
| 33 | + enumspb "go.temporal.io/api/enums/v1" |
| 34 | + workflowpb "go.temporal.io/api/workflow/v1" |
| 35 | + "go.temporal.io/api/workflowservice/v1" |
| 36 | + sdkclient "go.temporal.io/sdk/client" |
| 37 | + "go.temporal.io/sdk/worker" |
| 38 | + "go.temporal.io/sdk/workflow" |
| 39 | + "go.temporal.io/server/common/dynamicconfig" |
| 40 | + "go.temporal.io/server/common/testing/testvars" |
| 41 | + "go.temporal.io/server/tests/testcore" |
| 42 | +) |
| 43 | + |
| 44 | +type TaskQueueSuite struct { |
| 45 | + testcore.FunctionalTestSuite |
| 46 | + sdkClient sdkclient.Client |
| 47 | +} |
| 48 | + |
| 49 | +func TestTaskQueueSuite(t *testing.T) { |
| 50 | + t.Parallel() |
| 51 | + suite.Run(t, new(TaskQueueSuite)) |
| 52 | +} |
| 53 | + |
| 54 | +func (s *TaskQueueSuite) SetupSuite() { |
| 55 | + dynamicConfigOverrides := map[dynamicconfig.Key]any{ |
| 56 | + dynamicconfig.MatchingNumTaskqueueWritePartitions.Key(): 4, |
| 57 | + dynamicconfig.MatchingNumTaskqueueReadPartitions.Key(): 4, |
| 58 | + } |
| 59 | + s.FunctionalTestSuite.SetupSuiteWithDefaultCluster(testcore.WithDynamicConfigOverrides(dynamicConfigOverrides)) |
| 60 | +} |
| 61 | + |
| 62 | +func (s *TaskQueueSuite) SetupTest() { |
| 63 | + s.FunctionalTestSuite.SetupTest() |
| 64 | + |
| 65 | + var err error |
| 66 | + s.sdkClient, err = sdkclient.Dial(sdkclient.Options{ |
| 67 | + HostPort: s.FrontendGRPCAddress(), |
| 68 | + Namespace: s.Namespace().String(), |
| 69 | + }) |
| 70 | + s.NoError(err) |
| 71 | +} |
| 72 | + |
| 73 | +func (s *TaskQueueSuite) TearDownTest() { |
| 74 | + if s.sdkClient != nil { |
| 75 | + s.sdkClient.Close() |
| 76 | + } |
| 77 | + s.FunctionalTestBase.TearDownTest() |
| 78 | +} |
| 79 | + |
| 80 | +// Not using RunTestWithMatchingBehavior because I want to pass different expected drain times for different configurations |
| 81 | +func (s *TaskQueueSuite) TestTaskQueueRateLimit() { |
| 82 | + s.RunTaskQueueRateLimitTest(1, 1, 12*time.Second, true) // ~0.75s avg |
| 83 | + s.RunTaskQueueRateLimitTest(1, 1, 12*time.Second, false) // ~1.1s avg |
| 84 | + |
| 85 | + // Testing multiple partitions with insufficient pollers is too flaky, because token recycling |
| 86 | + // depends on a process being available to accept the token, so I'm not testing it |
| 87 | + s.RunTaskQueueRateLimitTest(4, 8, 24*time.Second, true) // ~1.6s avg |
| 88 | + s.RunTaskQueueRateLimitTest(4, 8, 24*time.Second, false) // ~6s avg |
| 89 | +} |
| 90 | + |
| 91 | +func (s *TaskQueueSuite) RunTaskQueueRateLimitTest(nPartitions, nWorkers int, timeToDrain time.Duration, useNewMatching bool) { |
| 92 | + s.Run(s.testTaskQueueRateLimitName(nPartitions, nWorkers, useNewMatching), func() { s.taskQueueRateLimitTest(nPartitions, nWorkers, timeToDrain, useNewMatching) }) |
| 93 | +} |
| 94 | + |
| 95 | +func (s *TaskQueueSuite) taskQueueRateLimitTest(nPartitions, nWorkers int, timeToDrain time.Duration, useNewMatching bool) { |
| 96 | + if useNewMatching { |
| 97 | + s.OverrideDynamicConfig(dynamicconfig.MatchingUseNewMatcher, true) |
| 98 | + } |
| 99 | + s.OverrideDynamicConfig(dynamicconfig.MatchingNumTaskqueueReadPartitions, nPartitions) |
| 100 | + s.OverrideDynamicConfig(dynamicconfig.MatchingNumTaskqueueWritePartitions, nPartitions) |
| 101 | + |
| 102 | + // exclude the effect of the default forwarding rate limit (10) |
| 103 | + s.OverrideDynamicConfig(dynamicconfig.MatchingForwarderMaxRatePerSecond, 1000) |
| 104 | + |
| 105 | + // 30 tasks at 1 task per second is 30 seconds. |
| 106 | + // if invalid tasks are NOT using the rate limit, then this should take well below that long. |
| 107 | + // task forwarding between task queue partitions is rate-limited by default to 10 rps. |
| 108 | + s.OverrideDynamicConfig(dynamicconfig.AdminMatchingNamespaceTaskqueueToPartitionDispatchRate, 1) |
| 109 | + s.OverrideDynamicConfig(dynamicconfig.TaskQueueInfoByBuildIdTTL, 0) |
| 110 | + |
| 111 | + const maxBacklog = 30 |
| 112 | + tv := testvars.New(s.T()) |
| 113 | + |
| 114 | + helloRateLimitTest := func(ctx workflow.Context, name string) (string, error) { |
| 115 | + return "Hello " + name + " !", nil |
| 116 | + } |
| 117 | + |
| 118 | + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) |
| 119 | + defer cancel() |
| 120 | + |
| 121 | + // start workflows to create a backlog |
| 122 | + for wfidx := 0; wfidx < maxBacklog; wfidx++ { |
| 123 | + _, err := s.sdkClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ |
| 124 | + TaskQueue: tv.TaskQueue().GetName(), |
| 125 | + ID: fmt.Sprintf("wf%d", wfidx), |
| 126 | + }, helloRateLimitTest, "Donna") |
| 127 | + s.NoError(err) |
| 128 | + } |
| 129 | + |
| 130 | + // wait for backlog to be >= maxBacklog |
| 131 | + wfBacklogCount := int64(0) |
| 132 | + s.Eventually( |
| 133 | + func() bool { |
| 134 | + wfBacklogCount = s.getBacklogCount(ctx, tv) |
| 135 | + return wfBacklogCount >= maxBacklog |
| 136 | + }, |
| 137 | + 5*time.Second, |
| 138 | + 200*time.Millisecond, |
| 139 | + ) |
| 140 | + |
| 141 | + // terminate all those workflow executions so that all the tasks in the backlog are invalid |
| 142 | + var wfList []*workflowpb.WorkflowExecutionInfo |
| 143 | + s.Eventually( |
| 144 | + func() bool { |
| 145 | + listResp, err := s.FrontendClient().ListWorkflowExecutions(ctx, &workflowservice.ListWorkflowExecutionsRequest{ |
| 146 | + Namespace: s.Namespace().String(), |
| 147 | + Query: fmt.Sprintf("TaskQueue = '%s'", tv.TaskQueue().GetName()), |
| 148 | + }) |
| 149 | + s.NoError(err) |
| 150 | + wfList = listResp.GetExecutions() |
| 151 | + return len(wfList) == maxBacklog |
| 152 | + }, |
| 153 | + 5*time.Second, |
| 154 | + 200*time.Millisecond, |
| 155 | + ) |
| 156 | + |
| 157 | + for _, exec := range wfList { |
| 158 | + _, err := s.FrontendClient().TerminateWorkflowExecution(ctx, &workflowservice.TerminateWorkflowExecutionRequest{ |
| 159 | + Namespace: s.Namespace().String(), |
| 160 | + WorkflowExecution: &commonpb.WorkflowExecution{WorkflowId: exec.GetExecution().GetWorkflowId(), RunId: exec.GetExecution().GetRunId()}, |
| 161 | + Reason: "test", |
| 162 | + Identity: tv.ClientIdentity(), |
| 163 | + }) |
| 164 | + s.NoError(err) |
| 165 | + } |
| 166 | + |
| 167 | + // start some workers |
| 168 | + workers := make([]worker.Worker, nWorkers) |
| 169 | + for i := 0; i < nWorkers; i++ { |
| 170 | + workers[i] = worker.New(s.sdkClient, tv.TaskQueue().GetName(), worker.Options{}) |
| 171 | + workers[i].RegisterWorkflow(helloRateLimitTest) |
| 172 | + err := workers[i].Start() |
| 173 | + s.NoError(err) |
| 174 | + } |
| 175 | + |
| 176 | + // wait for backlog to be 0 |
| 177 | + s.Eventually( |
| 178 | + func() bool { |
| 179 | + wfBacklogCount = s.getBacklogCount(ctx, tv) |
| 180 | + return wfBacklogCount == 0 |
| 181 | + }, |
| 182 | + timeToDrain, |
| 183 | + 500*time.Millisecond, |
| 184 | + ) |
| 185 | + |
| 186 | +} |
| 187 | + |
| 188 | +func (s *TaskQueueSuite) getBacklogCount(ctx context.Context, tv *testvars.TestVars) int64 { |
| 189 | + resp, err := s.FrontendClient().DescribeTaskQueue(ctx, &workflowservice.DescribeTaskQueueRequest{ |
| 190 | + Namespace: s.Namespace().String(), |
| 191 | + TaskQueue: tv.TaskQueue(), |
| 192 | + ApiMode: enumspb.DESCRIBE_TASK_QUEUE_MODE_ENHANCED, |
| 193 | + ReportStats: true, |
| 194 | + }) |
| 195 | + s.NoError(err) |
| 196 | + return resp.GetVersionsInfo()[""].GetTypesInfo()[sdkclient.TaskQueueTypeWorkflow].GetStats().GetApproximateBacklogCount() |
| 197 | +} |
| 198 | + |
| 199 | +func (s *TaskQueueSuite) testTaskQueueRateLimitName(nPartitions, nWorkers int, useNewMatching bool) string { |
| 200 | + ret := fmt.Sprintf("%vPartitions_%vWorkers", nPartitions, nWorkers) |
| 201 | + if useNewMatching { |
| 202 | + return "NewMatching_" + ret |
| 203 | + } |
| 204 | + return "OldMatching_" + ret |
| 205 | +} |
0 commit comments