diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 15672bdc74..fa273efd91 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -9,7 +9,7 @@ on: name: Release jobs: load: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 timeout-minutes: 30 strategy: matrix: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cdd791278e..39474ab41b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -54,7 +54,7 @@ jobs: run: docker compose down unit: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 steps: - uses: actions/checkout@v6 - name: Setup Go @@ -72,7 +72,7 @@ jobs: run: go test $(go list ./... | grep -v "quickstart") -v -failfast integration: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 env: DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable @@ -117,7 +117,7 @@ jobs: run: docker compose down e2e: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 timeout-minutes: 30 env: DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable @@ -206,7 +206,7 @@ jobs: run: docker compose down e2e-pgmq: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 timeout-minutes: 30 env: DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable @@ -297,7 +297,7 @@ jobs: run: docker compose down load: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 timeout-minutes: 30 strategy: matrix: @@ -339,8 +339,213 @@ jobs: TESTING_MATRIX_PG_VERSION: ${{ matrix.pg-version }} TESTING_MATRIX_OPTIMISTIC_SCHEDULING: ${{ matrix.optimistic-scheduling }} + load-online-migrate: + runs-on: ubicloud-standard-8 + timeout-minutes: 30 + env: + DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable + + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + fetch-tags: true + + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: "1.25" + + - name: Compose + run: docker compose up -d + + - name: Determine latest stable release tag + run: | + LATEST_TAG=$(git tag --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -1) + if [ -z "$LATEST_TAG" ]; then + echo "ERROR: No stable release tag found" + exit 1 + fi + echo "Latest stable tag: $LATEST_TAG" + echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV + + - name: Pull old release images + run: | + docker pull ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{ env.LATEST_TAG }} + docker pull ghcr.io/hatchet-dev/hatchet/hatchet-admin:${{ env.LATEST_TAG }} + docker pull ghcr.io/hatchet-dev/hatchet/hatchet-engine:${{ env.LATEST_TAG }} + docker pull ghcr.io/hatchet-dev/hatchet/hatchet-loadtest:${{ env.LATEST_TAG }} + + - name: Run old migrations + run: | + docker run --rm --network host \ + -e DATABASE_URL="${{ env.DATABASE_URL }}" \ + ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{ env.LATEST_TAG }} + + - name: Setup config and seed database + run: | + mkdir -p generated + docker run --rm --network host \ + -v ${{ github.workspace }}/generated:/hatchet/generated \ + -e DATABASE_URL="${{ env.DATABASE_URL }}" \ + -e SERVER_GRPC_PORT=7077 \ + -e SERVER_GRPC_BROADCAST_ADDRESS=localhost:7077 \ + -e SERVER_GRPC_INSECURE=true \ + -e SERVER_AUTH_COOKIE_DOMAIN=localhost \ + -e SERVER_AUTH_COOKIE_INSECURE=true \ + ghcr.io/hatchet-dev/hatchet/hatchet-admin:${{ env.LATEST_TAG }} \ + /hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/generated + + - name: Generate API token + run: | + TOKEN=$(docker run --rm --network host \ + -v ${{ github.workspace }}/generated:/hatchet/generated \ + -e DATABASE_URL="${{ env.DATABASE_URL }}" \ + -e SERVER_GRPC_PORT=7077 \ + -e SERVER_GRPC_BROADCAST_ADDRESS=localhost:7077 \ + -e SERVER_GRPC_INSECURE=true \ + -e SERVER_AUTH_COOKIE_DOMAIN=localhost \ + -e SERVER_AUTH_COOKIE_INSECURE=true \ + ghcr.io/hatchet-dev/hatchet/hatchet-admin:${{ env.LATEST_TAG }} \ + /hatchet/hatchet-admin token create --config /hatchet/generated) + echo "HATCHET_CLIENT_TOKEN=$TOKEN" >> $GITHUB_ENV + + - name: Start old engine + run: | + docker run -d --name hatchet-engine --network host \ + -v ${{ github.workspace }}/generated:/hatchet/generated \ + -e DATABASE_URL="${{ env.DATABASE_URL }}" \ + -e SERVER_GRPC_PORT=7077 \ + -e SERVER_GRPC_BROADCAST_ADDRESS=localhost:7077 \ + -e SERVER_GRPC_INSECURE=true \ + -e SERVER_AUTH_COOKIE_DOMAIN=localhost \ + -e SERVER_AUTH_COOKIE_INSECURE=true \ + -e SERVER_MSGQUEUE_KIND=postgres \ + -e SERVER_LOGGER_LEVEL=warn \ + -e SERVER_LOGGER_FORMAT=console \ + -e DATABASE_LOGGER_LEVEL=warn \ + -e DATABASE_LOGGER_FORMAT=console \ + ghcr.io/hatchet-dev/hatchet/hatchet-engine:${{ env.LATEST_TAG }} \ + /hatchet/hatchet-engine --config /hatchet/generated + echo "Waiting 30s for engine to start..." + sleep 30 + + - name: Start old load test + run: | + docker run -d --name hatchet-loadtest --network host \ + -e HATCHET_CLIENT_TOKEN="${{ env.HATCHET_CLIENT_TOKEN }}" \ + -e HATCHET_CLIENT_TLS_STRATEGY=none \ + -e HATCHET_CLIENT_HOST_PORT=localhost:7077 \ + ghcr.io/hatchet-dev/hatchet/hatchet-loadtest:${{ env.LATEST_TAG }} \ + /hatchet/hatchet-load-test loadtest -e 10 -d 240s -w 60s -s 100 + + - name: Wait then apply new migrations + run: | + echo "Waiting 30s for load test to get started..." + sleep 30 + echo "Applying new migrations from current branch..." + go run ./cmd/hatchet-migrate + echo "New migrations applied successfully" + + - name: Wait for load test to complete + run: | + echo "Waiting for load test container to finish..." + docker wait hatchet-loadtest + EXIT_CODE=$(docker inspect hatchet-loadtest --format='{{.State.ExitCode}}') + echo "Load test exited with code: $EXIT_CODE" + if [ "$EXIT_CODE" != "0" ]; then + echo "=== Load test logs ===" + docker logs hatchet-loadtest + echo "=== Engine logs ===" + docker logs hatchet-engine + exit 1 + fi + echo "Load test passed" + + - name: Teardown + if: always() + run: | + docker rm -f hatchet-loadtest hatchet-engine 2>/dev/null || true + docker compose down + + load-deadlock: + runs-on: ubicloud-standard-8 + timeout-minutes: 30 + strategy: + matrix: + migrate-strategy: ["latest"] + rabbitmq-enabled: ["true"] + pg-version: ["17-alpine"] + optimistic-scheduling: ["true", "false"] + + steps: + - uses: actions/checkout@v6 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: "1.25" + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.16.1 + run_install: false + + - name: Go deps + run: go mod download + + - name: Add go-deadlock dependency + run: go get github.com/sasha-s/go-deadlock@v0.3.6 + + - name: Patch sync imports to use go-deadlock (sed) + shell: bash + run: | + set -euo pipefail + + # Replace ONLY the stdlib "sync" import with an alias that preserves `sync.X` call sites. + # - `import "sync"` -> `import sync "github.com/sasha-s/go-deadlock"` + # - within import blocks: `"sync"` -> `sync "github.com/sasha-s/go-deadlock"` + # NOTE: use `-i''` (no backup) for portability across GNU/BSD sed. + find . -name '*.go' -not -path './vendor/*' -print0 | xargs -0 sed -i'' -E \ + -e 's/^([[:space:]]*)import[[:space:]]+"sync"[[:space:]]*$/\1import sync "github.com\/sasha-s\/go-deadlock"/' \ + -e 's/^([[:space:]]*)"sync"[[:space:]]*$/\1sync "github.com\/sasha-s\/go-deadlock"/' + + # Keep formatting/import grouping consistent after rewriting. + find . -name '*.go' -not -path './vendor/*' -print0 | xargs -0 gofmt -w + + # Evidence in CI logs that rewriting happened (or not). + echo "Changed Go files (after patch):" + git diff --name-only -- '*.go' || true + + echo "" + echo "Contents of pkg/scheduling/v1/scheduler.go after patch:" + echo "----" + cat pkg/scheduling/v1/scheduler.go + echo "----" + + - name: Test (deadlock-instrumented) + run: | + # Disable gzip compression for load tests to reduce CPU overhead + # Compression adds overhead without benefit for 0kb payloads + HATCHET_CLIENT_DISABLE_GZIP_COMPRESSION=true go test -tags load ./... -p 5 -v -race -failfast -timeout 20m + env: + # This job adds go-deadlock + -race overhead; relax perf threshold to avoid flakes. + HATCHET_LOADTEST_AVERAGE_DURATION_THRESHOLD: 1s + # Give the engine a bit more time to come up under instrumentation. + HATCHET_LOADTEST_STARTUP_SLEEP: 30s + TESTING_MATRIX_MIGRATE: ${{ matrix.migrate-strategy }} + TESTING_MATRIX_RABBITMQ_ENABLED: ${{ matrix.rabbitmq-enabled }} + TESTING_MATRIX_PG_VERSION: ${{ matrix.pg-version }} + TESTING_MATRIX_OPTIMISTIC_SCHEDULING: ${{ matrix.optimistic-scheduling }} + rampup: - runs-on: ubicloud-standard-4 + runs-on: ubicloud-standard-8 timeout-minutes: 30 strategy: matrix: diff --git a/api-contracts/dispatcher/dispatcher.proto b/api-contracts/dispatcher/dispatcher.proto index 70f5c1abe5..303d479eee 100644 --- a/api-contracts/dispatcher/dispatcher.proto +++ b/api-contracts/dispatcher/dispatcher.proto @@ -33,6 +33,11 @@ service Dispatcher { rpc ReleaseSlot(ReleaseSlotRequest) returns (ReleaseSlotResponse) {} rpc UpsertWorkerLabels(UpsertWorkerLabelsRequest) returns (UpsertWorkerLabelsResponse) {} + + // GetVersion returns the dispatcher protocol version as a simple integer. + // SDKs use this to determine feature support (e.g. slot_config registration). + // Old engines that do not implement this RPC will return UNIMPLEMENTED. + rpc GetVersion(GetVersionRequest) returns (GetVersionResponse) {} } message WorkerLabels { @@ -67,7 +72,8 @@ message WorkerRegisterRequest { // (optional) the services for this worker repeated string services = 3; - // (optional) the number of slots this worker can handle + // (optional) the number of default slots this worker can handle + // deprecated: use slot_config instead optional int32 slots = 4; // (optional) worker labels (i.e. state or other metadata) @@ -79,6 +85,9 @@ message WorkerRegisterRequest { // (optional) information regarding the runtime environment of the worker optional RuntimeInfo runtime_info = 7; + // (optional) slot config for this worker (slot_type -> units) + map slot_config = 9; + } message WorkerRegisterResponse { @@ -403,3 +412,9 @@ message ReleaseSlotRequest { } message ReleaseSlotResponse {} + +message GetVersionRequest {} + +message GetVersionResponse { + string version = 1; +} diff --git a/api-contracts/openapi/components/schemas/worker.yaml b/api-contracts/openapi/components/schemas/worker.yaml index 5c50a03155..36f42bcddc 100644 --- a/api-contracts/openapi/components/schemas/worker.yaml +++ b/api-contracts/openapi/components/schemas/worker.yaml @@ -76,6 +76,19 @@ WorkerType: - MANAGED - WEBHOOK +WorkerSlotConfig: + type: object + description: Slot availability and limits for a slot type. + properties: + available: + type: integer + description: The number of available units for this slot type. + limit: + type: integer + description: The maximum number of units for this slot type. + required: + - limit + RegisteredWorkflow: type: object properties: @@ -136,12 +149,11 @@ Worker: - ACTIVE - INACTIVE - PAUSED - maxRuns: - type: integer - description: The maximum number of runs this worker can execute concurrently. - availableRuns: - type: integer - description: The number of runs this worker can execute concurrently. + slotConfig: + type: object + description: Slot availability and limits for this worker (slot_type -> { available, limit }). + additionalProperties: + $ref: "#/WorkerSlotConfig" dispatcherId: type: string description: "the id of the assigned dispatcher, in UUID format" diff --git a/api-contracts/openapi/components/schemas/workflow.yaml b/api-contracts/openapi/components/schemas/workflow.yaml index 97e88d13a9..0c3d8031e2 100644 --- a/api-contracts/openapi/components/schemas/workflow.yaml +++ b/api-contracts/openapi/components/schemas/workflow.yaml @@ -281,6 +281,14 @@ Step: timeout: type: string description: The timeout of the step. + isDurable: + type: boolean + description: Whether the step is durable. + slotRequests: + type: object + description: Slot requests for the step (slot_type -> units). + additionalProperties: + type: integer children: type: array items: diff --git a/api-contracts/v1/workflows.proto b/api-contracts/v1/workflows.proto index 5fad08f5d3..8634018ce0 100644 --- a/api-contracts/v1/workflows.proto +++ b/api-contracts/v1/workflows.proto @@ -168,6 +168,8 @@ message CreateTaskOpts { repeated Concurrency concurrency = 11; // (optional) the task concurrency options optional TaskConditions conditions = 12; // (optional) the task conditions for creating the task optional string schedule_timeout = 13; // (optional) the timeout for the schedule + bool is_durable = 14; // (optional) whether the task is durable + map slot_requests = 15; // (optional) slot requests (slot_type -> units) } message CreateTaskRateLimit { diff --git a/api/v1/server/handlers/v1/filters/create.go b/api/v1/server/handlers/v1/filters/create.go index d019c289c7..88259d17de 100644 --- a/api/v1/server/handlers/v1/filters/create.go +++ b/api/v1/server/handlers/v1/filters/create.go @@ -5,12 +5,13 @@ import ( "fmt" "github.com/google/uuid" + "github.com/labstack/echo/v4" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" v1 "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" - "github.com/labstack/echo/v4" ) func (t *V1FiltersService) V1FilterCreate(ctx echo.Context, request gen.V1FilterCreateRequestObject) (gen.V1FilterCreateResponseObject, error) { diff --git a/api/v1/server/handlers/v1/filters/list.go b/api/v1/server/handlers/v1/filters/list.go index 773359d284..95f3c5681a 100644 --- a/api/v1/server/handlers/v1/filters/list.go +++ b/api/v1/server/handlers/v1/filters/list.go @@ -2,12 +2,13 @@ package filtersv1 import ( "github.com/google/uuid" + "github.com/labstack/echo/v4" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" v1 "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" - "github.com/labstack/echo/v4" ) func (t *V1FiltersService) V1FilterList(ctx echo.Context, request gen.V1FilterListRequestObject) (gen.V1FilterListResponseObject, error) { diff --git a/api/v1/server/handlers/workers/get.go b/api/v1/server/handlers/workers/get.go index e032644c3a..70d14ced47 100644 --- a/api/v1/server/handlers/workers/get.go +++ b/api/v1/server/handlers/workers/get.go @@ -19,34 +19,31 @@ func (t *WorkerService) WorkerGet(ctx echo.Context, request gen.WorkerGetRequest } func (t *WorkerService) workerGetV1(ctx echo.Context, tenant *sqlcv1.Tenant, request gen.WorkerGetRequestObject) (gen.WorkerGetResponseObject, error) { + reqCtx := ctx.Request().Context() workerV0 := ctx.Get("worker").(*sqlcv1.GetWorkerByIdRow) - worker, err := t.config.V1.Workers().GetWorkerById(workerV0.Worker.ID) + worker, err := t.config.V1.Workers().GetWorkerById(reqCtx, workerV0.Worker.ID) if err != nil { return nil, err } - slotState, err := t.config.V1.Workers().ListWorkerState( + workerIdToActions, err := t.config.V1.Workers().GetWorkerActionsByWorkerId( + reqCtx, worker.Worker.TenantId, - worker.Worker.ID, - int(worker.Worker.MaxRuns), + []uuid.UUID{worker.Worker.ID}, ) if err != nil { return nil, err } - workerIdToActions, err := t.config.V1.Workers().GetWorkerActionsByWorkerId( - worker.Worker.TenantId, - []uuid.UUID{worker.Worker.ID}, - ) - + workerSlotConfig, err := buildWorkerSlotConfig(ctx.Request().Context(), t.config.V1.Workers(), worker.Worker.TenantId, []uuid.UUID{worker.Worker.ID}) if err != nil { return nil, err } - workerWorkflows, err := t.config.V1.Workers().GetWorkerWorkflowsByWorkerId(tenant.ID, worker.Worker.ID) + workerWorkflows, err := t.config.V1.Workers().GetWorkerWorkflowsByWorkerId(reqCtx, tenant.ID, worker.Worker.ID) if err != nil { return nil, err @@ -59,14 +56,14 @@ func (t *WorkerService) workerGetV1(ctx echo.Context, tenant *sqlcv1.Tenant, req respStepRuns := make([]gen.RecentStepRuns, 0) - slots := int(worker.RemainingSlots) + slotConfig := workerSlotConfig[worker.Worker.ID] - workerResp := *transformersv1.ToWorkerSqlc(&worker.Worker, &slots, &worker.WebhookUrl.String, actions, &workerWorkflows) + workerResp := *transformersv1.ToWorkerSqlc(&worker.Worker, slotConfig, actions, &workerWorkflows) workerResp.RecentStepRuns = &respStepRuns - workerResp.Slots = transformersv1.ToSlotState(slotState, slots) affinity, err := t.config.V1.Workers().ListWorkerLabels( + reqCtx, worker.Worker.TenantId, worker.Worker.ID, ) diff --git a/api/v1/server/handlers/workers/list.go b/api/v1/server/handlers/workers/list.go index b503d38f8d..141e325705 100644 --- a/api/v1/server/handlers/workers/list.go +++ b/api/v1/server/handlers/workers/list.go @@ -46,7 +46,7 @@ func (t *WorkerService) workerListV0(ctx echo.Context, tenant *sqlcv1.Tenant, re telemetry.AttributeKV{Key: "tenant.id", Value: tenant.ID}, ) - workers, err := t.config.V1.Workers().ListWorkers(tenantId, opts) + workers, err := t.config.V1.Workers().ListWorkers(reqCtx, tenantId, opts) if err != nil { listSpan.RecordError(err) @@ -58,12 +58,21 @@ func (t *WorkerService) workerListV0(ctx echo.Context, tenant *sqlcv1.Tenant, re ) rows := make([]gen.Worker, len(workers)) + workerIds := make([]uuid.UUID, 0, len(workers)) + for _, worker := range workers { + workerIds = append(workerIds, worker.Worker.ID) + } + + workerSlotConfig, err := buildWorkerSlotConfig(reqCtx, t.config.V1.Workers(), tenantId, workerIds) + if err != nil { + listSpan.RecordError(err) + return nil, err + } for i, worker := range workers { workerCp := worker - slots := int(worker.RemainingSlots) - - rows[i] = *transformers.ToWorkerSqlc(&workerCp.Worker, &slots, &workerCp.WebhookUrl.String, nil) + slotConfig := workerSlotConfig[workerCp.Worker.ID] + rows[i] = *transformers.ToWorkerSqlc(&workerCp.Worker, slotConfig, nil) } return gen.WorkerList200JSONResponse( @@ -90,7 +99,7 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re telemetry.AttributeKV{Key: "tenant.id", Value: tenant.ID}, ) - workers, err := t.config.V1.Workers().ListWorkers(tenantId, opts) + workers, err := t.config.V1.Workers().ListWorkers(listCtx, tenantId, opts) if err != nil { listSpan.RecordError(err) @@ -120,6 +129,7 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re ) workerIdToActionIds, err := t.config.V1.Workers().GetWorkerActionsByWorkerId( + listCtx, tenant.ID, workerIds, ) @@ -129,6 +139,12 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re return nil, err } + workerSlotConfig, err := buildWorkerSlotConfig(listCtx, t.config.V1.Workers(), tenant.ID, workerIds) + if err != nil { + actionsSpan.RecordError(err) + return nil, err + } + telemetry.WithAttributes(actionsSpan, telemetry.AttributeKV{Key: "worker_actions.mappings.count", Value: len(workerIdToActionIds)}, ) @@ -137,10 +153,10 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re for i, worker := range workers { workerCp := worker - slots := int(worker.RemainingSlots) actions := workerIdToActionIds[workerCp.Worker.ID.String()] + slotConfig := workerSlotConfig[workerCp.Worker.ID] - rows[i] = *transformersv1.ToWorkerSqlc(&workerCp.Worker, &slots, &workerCp.WebhookUrl.String, actions, nil) + rows[i] = *transformersv1.ToWorkerSqlc(&workerCp.Worker, slotConfig, actions, nil) } return gen.WorkerList200JSONResponse( diff --git a/api/v1/server/handlers/workers/slot_config.go b/api/v1/server/handlers/workers/slot_config.go new file mode 100644 index 0000000000..4a05618057 --- /dev/null +++ b/api/v1/server/handlers/workers/slot_config.go @@ -0,0 +1,66 @@ +package workers + +import ( + "context" + "fmt" + + "github.com/google/uuid" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" +) + +type slotAvailabilityRepository interface { + ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error) + ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotType string) (map[uuid.UUID]int32, error) + ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotTypes []string) (map[uuid.UUID]map[string]int32, error) +} + +func buildWorkerSlotConfig(ctx context.Context, repo slotAvailabilityRepository, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]map[string]gen.WorkerSlotConfig, error) { + if len(workerIds) == 0 { + return map[uuid.UUID]map[string]gen.WorkerSlotConfig{}, nil + } + + slotConfigByWorker, err := repo.ListWorkerSlotConfigs(ctx, tenantId, workerIds) + if err != nil { + return nil, fmt.Errorf("could not list worker slot config: %w", err) + } + + slotTypes := make(map[string]struct{}) + slotTypesArr := make([]string, 0) + for _, config := range slotConfigByWorker { + for slotType := range config { + if _, ok := slotTypes[slotType]; ok { + continue + } + + slotTypes[slotType] = struct{}{} + slotTypesArr = append(slotTypesArr, slotType) + } + } + + availableByWorker, err := repo.ListAvailableSlotsForWorkersAndTypes(ctx, tenantId, workerIds, slotTypesArr) + if err != nil { + return nil, fmt.Errorf("could not list available slots for workers and types: %w", err) + } + + result := make(map[uuid.UUID]map[string]gen.WorkerSlotConfig, len(slotConfigByWorker)) + for workerId, config := range slotConfigByWorker { + workerSlots := make(map[string]gen.WorkerSlotConfig, len(config)) + for slotType, limit := range config { + available := 0 + if workerAvailability, ok := availableByWorker[workerId]; ok { + if value, ok := workerAvailability[slotType]; ok { + available = int(value) + } + } + + workerSlots[slotType] = gen.WorkerSlotConfig{ + Available: &available, + Limit: int(limit), + } + } + result[workerId] = workerSlots + } + + return result, nil +} diff --git a/api/v1/server/handlers/workers/update.go b/api/v1/server/handlers/workers/update.go index c7cd860aeb..a3ccd9809d 100644 --- a/api/v1/server/handlers/workers/update.go +++ b/api/v1/server/handlers/workers/update.go @@ -1,6 +1,7 @@ package workers import ( + "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" @@ -36,5 +37,12 @@ func (t *WorkerService) WorkerUpdate(ctx echo.Context, request gen.WorkerUpdateR return nil, err } - return gen.WorkerUpdate200JSONResponse(*transformers.ToWorkerSqlc(updatedWorker, nil, nil, nil)), nil + workerSlotConfig, err := buildWorkerSlotConfig(ctx.Request().Context(), t.config.V1.Workers(), worker.Worker.TenantId, []uuid.UUID{updatedWorker.ID}) + if err != nil { + return nil, err + } + + slotConfig := workerSlotConfig[updatedWorker.ID] + + return gen.WorkerUpdate200JSONResponse(*transformers.ToWorkerSqlc(updatedWorker, slotConfig, nil)), nil } diff --git a/api/v1/server/oas/gen/openapi.gen.go b/api/v1/server/oas/gen/openapi.gen.go index 75438e8d0a..5f43ce3136 100644 --- a/api/v1/server/oas/gen/openapi.gen.go +++ b/api/v1/server/oas/gen/openapi.gen.go @@ -999,15 +999,21 @@ type SlackWebhook struct { // Step defines model for Step. type Step struct { - Action string `json:"action"` - Children *[]string `json:"children,omitempty"` - JobId string `json:"jobId"` - Metadata APIResourceMeta `json:"metadata"` - Parents *[]string `json:"parents,omitempty"` + Action string `json:"action"` + Children *[]string `json:"children,omitempty"` + + // IsDurable Whether the step is durable. + IsDurable *bool `json:"isDurable,omitempty"` + JobId string `json:"jobId"` + Metadata APIResourceMeta `json:"metadata"` + Parents *[]string `json:"parents,omitempty"` // ReadableId The readable id of the step. ReadableId string `json:"readableId"` - TenantId string `json:"tenantId"` + + // SlotRequests Slot requests for the step (slot_type -> units). + SlotRequests *map[string]int `json:"slotRequests,omitempty"` + TenantId string `json:"tenantId"` // Timeout The timeout of the step. Timeout *string `json:"timeout,omitempty"` @@ -2123,9 +2129,6 @@ type Worker struct { // Actions The actions this worker can perform. Actions *[]string `json:"actions,omitempty"` - // AvailableRuns The number of runs this worker can execute concurrently. - AvailableRuns *int `json:"availableRuns,omitempty"` - // DispatcherId the id of the assigned dispatcher, in UUID format DispatcherId *openapi_types.UUID `json:"dispatcherId,omitempty"` @@ -2136,11 +2139,8 @@ type Worker struct { LastHeartbeatAt *time.Time `json:"lastHeartbeatAt,omitempty"` // LastListenerEstablished The time this worker last sent a heartbeat. - LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"` - - // MaxRuns The maximum number of runs this worker can execute concurrently. - MaxRuns *int `json:"maxRuns,omitempty"` - Metadata APIResourceMeta `json:"metadata"` + LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"` + Metadata APIResourceMeta `json:"metadata"` // Name The name of the worker. Name string `json:"name"` @@ -2152,6 +2152,9 @@ type Worker struct { RegisteredWorkflows *[]RegisteredWorkflow `json:"registeredWorkflows,omitempty"` RuntimeInfo *WorkerRuntimeInfo `json:"runtimeInfo,omitempty"` + // SlotConfig Slot availability and limits for this worker (slot_type -> { available, limit }). + SlotConfig *map[string]WorkerSlotConfig `json:"slotConfig,omitempty"` + // Slots The semaphore slot state for the worker. Slots *[]SemaphoreSlots `json:"slots,omitempty"` @@ -2197,6 +2200,15 @@ type WorkerRuntimeInfo struct { // WorkerRuntimeSDKs defines model for WorkerRuntimeSDKs. type WorkerRuntimeSDKs string +// WorkerSlotConfig Slot availability and limits for a slot type. +type WorkerSlotConfig struct { + // Available The number of available units for this slot type. + Available *int `json:"available,omitempty"` + + // Limit The maximum number of units for this slot type. + Limit int `json:"limit"` +} + // WorkerType defines model for WorkerType. type WorkerType string @@ -16148,7 +16160,7 @@ func (sh *strictHandler) WorkflowVersionGet(ctx echo.Context, workflow openapi_t // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9e3PbOLIo/lVY+v2q7kyV5Fcmc+ak6v6h2EqiiWN7JTm5e+ekvBAJSxhTJJcA7WhT", + "H4sIAAAAAAAC/+y9e3PbOLIo/lVY+v2q7kyV5Fcmc+ak6v6h2EqiiWP7SHJy986mvBAJSxhTJJcA7WhT", "/u638CJBEiBBvSwlrNracUQ8Go3uRqPRj+8dN1xEYQADgjtvvnewO4cLwP7s3wwHcRzG9O8oDiMYEwTZ", "Fzf0IP2vB7Ebo4igMOi86QDHTTAJF84HQNw5JA6kvR3WuNuB38Ai8mHnzelvJyfdzn0YLwDpvOkkKCC/", "/9bpdsgygp03HRQQOINx57mbH748m/Jv5z6MHTJHmM+pTtfpZw0foYBpATEGM5jNikmMghmbNHTxnY+C", @@ -16168,304 +16180,306 @@ var swaggerSpec = []string{ "rsNhCls3FS8pMrRIdF0YEa4jjOC/E8iFSR6fXCHgmF2POhcoMBNrt/OtF4II9ehlYQaDHvxGYtAjYMag", "eAQ+ovvSeZOuuJskyOs8lwiJw6tb79vEf+A62OARBsS4ZPgo70JW+qpmyFrNlc/w9bnbOafnkG8B0NDL", "g9R4O7ILV8K4rcn2WC2IQsiWFAZuEscwcJeXaIHImMSAwNmSn97JgnY471+dDy7vhld3N6Pr96PBeNzp", - "di5G1zd3V4Mvg/Gk0+3843ZwO8j++X50fXtzN7q+vbq4G12/HV4pe5xBqcw9dsMIqnN+uR59fHd5/aXT", - "7Uz644+1/SEh9FediIkhxtpLKWVnNxvDydp2qRLoUW16BgNIMeIAemQ693G4cAjADw4KooTgriMZuetA", - "4h7pxJBfxGslgZr245kRwSgJsH4hC/ANLZKFEySLKdXX77OlEecpjB/u/fDJiZMgL0BRQF6dae/zWG6J", - "Jbh8C2lHAqMRBB7VlnRKN4U2Ft/TwxY6tBvF+NMcuXN+yKmbg/kO83sxPwVqJKzAVnEDuipNyGXqRJC6", - "NgJIHW2V9v0BLrnu53mILh34N7nu6h4YbColmPgP3230Mi7q5OFrllf82Bka+MNL4sxkIrcGihMZYYcJ", - "+/Jm2B8R4QKRAPldORFbjP747fPDl9841zp92fhfLZCGozDAsIw1IhWaMsZyYFWDwUcxw3Eeh8EXwbqT", - "GM1mMDbuY0ZlnxS1pzSwG4fBoJpuaZMrsQFlpZmKPe3IUYzCGJFlkbSZeBHSqfPmFTu8+N+nZZIvKQh0", - "tq5ucQqcpVV9TTFYfVbrcVYgurRNKupTCmQnqbLNGTL0YzGGshvgQXeJo/3ZKWTonm2TuhnlMeRXKXrT", - "cZocC+Vh2ScGHBvQuUc+gRSiek7g11GGtWzzxldjxbpg3EUSRsjtxyZ2XID/hIEjFXyHUozzS3909atc", - "/fhq7LAx1hFjqaa7QMH/Pu0uwLf/ffb697LKmwJr5npudOz7MCaDBUD++zhMIrP8pk2wTlj6CBO6Rt5C", - "mrZieiJa2n1WWL6HHmGXzVheuwC1buU1lxw+uHav2Se5rXStVJ/gl4yN7K1cV7cTh36tbsRX8wlSfWxE", - "22vx0RGD1WHFjI9ghgL4GcZSoNfDJBs/dzsweERxGCwgN3PX9x0oHawvytwWvok9YEgMg2kIYg8Fswsh", - "Z/U6Fjc/G+V5NgyXyiR0MAljyB5h9HBne4P9ZGYQg34y2/zCu+LNiZ14zwYTJQNKT0mZJoFtD8IqpGoV", - "C61EUWzAZfttqk40mmsNw84Cknno1ZsJFHR94l0UYq88blfWfbodTi1DTzuHvMPVfDZqbrKBYH7tMGYj", - "VQqabqDC7DlYBWVkdJDuQS2dXiKdvIvADAXpe0PVLt6kLVNFnonupyb2IpVvrN5FdLSjGDYuBu/6t5eT", - "DrOL6s0a6gDXsQfjt8t38lVZDhNIxReWLK/ZSEz73aXau6bWugZfk/Sltv4IK7JaGdzhRV6AF1/oxfu9", - "cSGS/kdJME4WCxDX2n3YVn0pd6tgSa4zpwv5Kjdcnon5TW9yI3F++XN8feVMlwTiX+uV91RtZ9N/XI8G", - "5Bh7wPzpcsp8LwHdFygrQBQS5ALF0JUgSSkCsNvhCpJZfpgkkIXoGUMQu3PtaWSi9/LrIbO5ax+RmZaZ", - "mTtlQ62R02BguwfIYmjeqsm4EQw8YY+uGlg0azLyvxOY1EPMWzUZN06CwAJi0azJyDhxXQi9eqDThvaj", - "p1SOq56GNDdF9u1IvQqvwGNrnFhmsa68N/0ZTjWCvMrPjslzxdNOnGJ/h9OjLb2QlsbEBEb20mtMYKRD", - "bKUqTNAChgnRL198rFv647pq8KOi/srrF1u6Tq/9M5yOkqBCuvE3cLt37bRT6vBpbjKCABsuZvcoQHje", - "bOq/OUVW7SglWt7SsHtrEF0MceLrzc+YgJg0WwwmgCTYYj30fOJt5fOWeIazJnG6+c2p3H2AcTULNFmu", - "opTWgawczIWe618b+SCSQNJdMHPNON0mqXrcDK4uhlfvO93O6Pbqiv81vj0/HwwuBhedbuddf3jJ/uAv", - "1/zvt/3zj9fv3mm1FarG6f3ZbL1gi101my0mYS9L2Py0tFPlMfXN0eqPFOK8ER6/MLx5aGpdHRTYxEQ6", - "MmPL9IH78AVO52H48OKLVGDZ1BLD2SUKYCPnPOYeQT9TRYJKFnmk+uHM8VEAm3hicQ9+7Rx0ONGgVkkx", - "9eYtNDaJArZUr7UsrCCd4WuGqkv4CP284ebtLRU0w6t3151u50t/dNXpdgaj0fVIL1OUcdLLk9X+5yDQ", - "CRLx/eXvnpKs9NKDf1zj/pkfoeENVHSuuINqEKD6an3vCPeZu4jR7lm3E8Bv8l+vup0gWbB/4M6b0xNm", - "Bc5xVq6zzqVTeudEnArTic+srlUKLFr/Z/itPPIru5GzdWk9UUMCfPUSS5syy46PMOGvG1n80InNLU4j", - "sf5Bb7CfIImRq5HHQbK4sbtiMzqWF+0j03r/YXWr5mMh7pjKrtjGAUd212k+orhUH3VqHSIyUHOzdFWE", - "6OT/CBDI/MnKqLSy2TIXOOZHpXdxA5iM4D3yDQ+zzEFZeDCrgzHv5Zh1hMyLaAtu3myiz8BPoK3jXMyf", - "WrHDImOEyVfs+hMKvPBJv+2bsCnXIPrRvA4pTTTrWAAP2i6Cf9NPwb+xZdC9RIHiEZahmcdw3IexCz1b", - "zw/lnqDsl1xvClWO0r6qdL0Hh2HGY9rjMP28xoFYHKN0JHJsSqwpqNSOBl0YkLFyny28EzHwTPTMvzo6", - "7z/VANHkhrqKRWINa8LWTAYCpZnNoHSBLvp3V/NIuhFd9W4tYCmOrhX/cIYwgTH05M1eEzxg2OfUdxh5", - "TpyOw6OzEGafYXyk8VkvIc/OVyTzVa6azCJCxejkOYL0r58nlmIEIx8sf6iwBb4kxUyFjSvLccfLrk9p", - "/vrkpGa9BbhNqzaZkZTu9kdYwe5nC5+ELqYyj4m+CrbS+w9rHX/pqAWLj2bAGcTkNjZonrejS+bXBQOP", - "OXqKSz92SLgdFwTTcZkE6N9UN/JgQNA9gnGqWwt1UMT2cX9UNSR2Cv0wmEmIa6XsFt1h7Qy9lS6uY3cO", - "vcSHCqWt69K+ZZf0bodw13t7PaGJF3s2+FcFPd7m7N4sNIv+MT7/MLi4pT/qlMF05u26Ce6pw1959ZnX", - "3y6c+xqT2Ob8AUdJcK4agRs/JnEAdn2WKgDYLHFspbh/KXV4ScfJjCgqfSbLtPs28R8uoA8JfMdCMFZ0", - "AUwjCFIPwAe4dNjl0okA4ulGeJCHM13mc008wOXpG9b0lLuqnfF/nTVJO9HtRCDOrqj6q1NDuuEjfqm7", - "kK1IjRsY7LnhFhuPz/t075tJvhL1sKifQiuNNr2+cjzkQzHdeIEC8c9TG6/bagyZlGSPffc2vJIiETfM", - "qaRfil2aJWVB3aqcS1Vz6HNC6aMBN0LupTxRDUC+ZaknKKWYLBrrbua6yl9ekjddmZG7ec6NdalKQV9T", - "FlQXKYFpvjoTZ26TZ9KEJVvley2KVuHMPTBtay4Hz6tJ5VUCIsqjmMzfqsZU/To8hgsQzcMYjv2QbNj2", - "nbMr6x0UuTkT+yF/AhM97B0qVrRDY1WR0kSuERg5cSIXVm9qUJ3Q6heKfF96Z9qvtHTRqLBQW4Ne4M0M", - "LV3V1l6wq1OqUT1zyr40cxAE0DeBKT47yNM//WE6uPPER9c/qvARrox2dDkFs6evOMlaFjCwMK2efltj", - "6bS7ed1s8HUWvRe2OzvrmkREiu48XXQVMtSeLwRGJnGndyWeI9+LYd4bslbn3Yr7L7+74WaQxKuloqkl", - "k7W80g0zmClAWUWOHKQXrdhA7hZUsfVb8ELvk0EU5lysFKvXhnzVGRF+MT1p1NJArjs+D5OA6ME1X3VW", - "eZvO+lRgqGi+zjnbW/hqi9CCtP3m2S5MiAnEFTmS+U7174V5wg6ZG/f9510qdmYNJcs27IW2NYkTC1nT", - "ZMVpl4oV8/f11S2cKQWmK6v07xeo68fuHD3Cg5RLzS3neyViQnqR0neq4PoYknhZIUW3xo/K7WU3LFFx", - "UVCQIPGov3Sa6H0f7vV5BtT6rYk2hlwCrpkKzA+2nr6DEiWgITnJgxbrEa4urAelG/gI5QOebe+x7GNF", - "d+9QjMkYciXZnvYuQdNeDSOx+C0jB2Bh5hSzCprU0Ai+vxXEvC9h8DkyrSXkTKRL09FowB/K766u775c", - "jz4ORp1u9uOoPxncXQ4/DSfZQ/rw6v3dZPhpcHF3fcvMV+Px8P0Vf2qf9EcT9lf//OPV9ZfLwcV7/kI/", - "vBqOP+Qf60eDyeif/DFffbenQ1/fTu5Gg3ejgegzGiiTqHOPL69py8tBf5yOORxc3L39593tmC1FJlS9", - "G91e3fH8rB8H/7xT3QcMTQSgWiuajmMUpCqxMmKBo+FkeN6/rBqtyu9B/HXH0fBpcFVAfAO/CPE3b10V", - "HDgB+EGfADSLxa9MOiL6J5iNko+1b9JRZ2GVbSozjdpM0qkaXUCgkf5pilT7lDqFtKqaC0Loe+LNw04q", - "sn3YfK7VkADfqrMWdWlCmmJRExiLpHADQ+q+L7I4Quiw1tLKtGC91NS3akmNAPhLglx8HZHrhFSMmpmt", - "5gA7YUSg5wjTRDqIfo51k8VtPSG7Kd3a2vnasoQBDTPs1aZ9Z3Blo381klIhIeRuM0FuKdWFOSGkds17", - "oGbo90KXOHMW9jjRdkbsvew5vyoUzETScrw7IcFzsA2+RYjuMov8ZsBUj8978Wmw88QqM7AgdgfE0AFR", - "FIfAnaNgxks0MARXzS8TWnIiYfEsK0LBlyxrYZThYQEwlbhQbIrvAPKTGFqAwryJVUBy+dVZuiD9nD7A", - "fKnm18EsVA4EYmfZC2ExQ291UAz4JonsHbO2iQNaG/3m3MsmDiAyoktQ1WZfiMySQAuwWS4M8geR1BP9", - "0AU+i596hH4Ysc8sLNdL3EIxNEW9U5LObi/b7HNa4KPyrVSWdxGlvXZZ8mS1lLZ1T2eCRU0Pf/KzGWu8", - "RdXTHxshlxneeIrXHEUyF2+2V2p+PSM1ctrZm8NJkHKzM4nvaRn+FyMo+1SOlPXqWt9iGPMeN8nUR24V", - "KbDxKrIyqzDvzaaL/Vtl00din6QUvf5yxSwG/YtPw6tOt/Np8OntYFQhO6tD9esvZ03uYlWYyMGhGMtW", - "vRoXxyuGLKUIkJRfLGCTGl4Go7vx5fWk0+0MPnObxaQ//ng3ur1iNpHrKyU8g6UYOb/+NLx6f/dl8PbD", - "9fXHCtzntCidIgniRUXwO/suXLq1ApqH6ZPQeQIxSydXUq94b30webO8APqUAJuJ8udjm5eoh3+9VGUp", - "TdSzb0pBdjH+dRvWPLR/AQmMZYC/PEf5WM4v6AgeOaeOB5Zd59R5gvCB/ncRBmT+64puLCl6tAH/ZrEr", - "EXUT+sjVpAvlGn/VJTitnMebapSGBmI3z351PqACOPPqhAXUVqAaBZJSDUDKo88nnW7n86lelHC3yR3E", - "5BnDPLk/cJOCNxV55Z/TAceauAVzIZE1Xb2rvbw5QD9jcQ915TVR9xupq2HU3FRARP+XB8TMagdtKW4N", - "TS9paNqiAWgrpd0aGPJXtsMbuPAL83kyJyzANyDButxgKptwxykHYSdirR0QeI4LgiAkDmClW1lNeJnX", - "unRg6aDDuvt4rT0KeF4MMVbtUjktWho6yuYp+uEDwHPdcTMHeK4O+b9wYTpxAHFFlJdUH/Pq5M75nBVM", - "1k/4GcboHtWhl1nXqAx6FM1FWf8cDHpOmAN8I4v/280BnEh0cDAkBv7axkuWh3Dkg2WOEeT+NTZk5bH7", - "1UBg53MQzKBEkJEJAvhkRiLjXfiUYU1q1HrYV9A75Mhs3VElICkQlfhbD4ZSBlbxpZvDkwnll+EMBatX", - "N1uNv9cqdrZ3GJdrjOpwLfNeHRS67U5Ig2DYw92S9c1tN01Vq/EcRfhQjawlo/MOT/NtnDJ8Mt22fT49", - "H1xewGky23St1a7QRzFaJD4gEGfJKNhrmRsmvudMIXsg5doHCEQVozB2QE5j1pWsqisQfj64VAqDs/vB", - "I/ATSv1ad2yfwPgGLP0QGDhQJMyIeJvy+oD8RLUPJwzoDzF8RGGCe8K9WIzRqcqvU56YfSrPR0oRlCJd", - "UbXpJldEW9hxaiijMtbbwAX0k0za5SBZrpZtAKtKzev3aHYic1/XhXzhxE9jsQo7nI3epROyojQY3ye+", - "VhG0ixEpY0GGi5QczI3BEsYxDKG89Ftuiem6WDE+bhVkXpKsnr85kfnn03MWCTEB+KGijDiBcQB8EVJv", - "NFeJZs7wAktSdEHgxPBeXL4RV8gBfqD8myNMtbNq59pwug67vCmfTyk+ZIaUZ/2GyQgS2hTrElRg02sF", - "RxdDQ7ps5GEu9J5gDLN6V1tDxTNfBJM5fKFVVeQrpajCX/J2UJRhqgZTIT6lcDQNo0SF1VWq1r6f8PGO", - "nFt6i6eT4GSKuaMWRbnHFB/RCjuAqNLILlVXZYrVdRNjGZI08kgshpDckWcQNGzLRZy7sudhAK/vO2/+", - "qhV2mv5vAUZuPyHzznN3lf79myEvxbdK5w+f+ued56/GxYnBmdHVX2eJkAFY0HzoomuliRiKQyLwxLpO", - "liYqpjvHbLEJmcOAIFdQYcgsMZJBRNi7IvT7N8O7j4N/aoR9Me+wnJ5DoqEWM0oZMvRpZj/C5aCx1qUu", - "iat3D3B55EyYtxR2mNGNhLxgCcy3cu7jcKHiQgqRozWyFKdYLXsaUzZbb4FsiPLihOrIakeEcQxdkokO", - "Ejri/Unv/sxsUNKNyooUx1kXoeggt1Kz5U1SCa2nw/KquJG72DutwV1O22DQSKVDdQZ2V0dv9iIvE1l7", - "IBhU+bklufC2Px6eb1cqMEG8B9ikcGwXmWylG8PlBZidK3k4inlnNBk66nXXtMRwWQX2wMw2T72Gl36i", - "stNWqmp4nwGgXnqm0AHB0vlzfH3VwzBGwEf/YU+PfGVHKym1FZMVjpEwdlxA4CyM0X/UiqjlswPCoCrH", - "EyZgEYmH0vTc5U7rMLB34dqvEt7iMGWZmE1FZJX7qJyMvctml7R0FGe6LMxoyamMmSYKMNo6kvw7CmZC", - "vl01UWKE33kKagYns4CAKPKRSwlzQ7XOxaLWqnaunfdrJn72wGYsBaHhXl3eWEMG13oKT/XCwjYyctTs", - "YW1iN4ukawrxp4yWTh0nwdGWbrLmIidmsvpBioq3pb8rUnPEaSm0f8sCadns6Z4oWV2EtDBlyG5g0aqy", - "I+m4C+EL6PogBkRkvTE7JQjORtjxsi7OLyRO4K/0AI/icBaDxYJdnX65Bz6Gv27aYcGo4yjKmlR1mMJW", - "xsdhmOk2oVBUbHsDK2Dd2JsUrNW56k2Gw4wsVDbai1M3y1pen6D286mxMi4gBC4ig9orPioSrFgYV5Ny", - "aieldn1Zt7YaScUasy9XobeYTkr3XEfipcMy0dhgunnJ3wI61ij6m420D5xQWZ43/VxVj7A/Pu90OxeD", - "8blhubwkVfs02PRpkONtOy+DsRh7yw+DFHSTqae57KQL0stN5gPwqSI3GLuqSgte/cYM0uYr5iKzzoxX", - "qU1DEiOI65dPv1xwnx1joRvaxspgx7N/MYNNs6Rj8hraLIUxb8KBU6dW9yzDtf5Sl27ZXojUjOjruEIS", - "5JYTjDXMKCbHymUSK2YP06ceK2YUGw+uJncTdTHpGu74CVlKf3Y+GvQnhapkH4c3N/zj9e0lxc7kbjy4", - "ulBG1p88ioy1NDXb57rBKOCRm02y8cOmdJSljC3VpwgI8lcpBVZdz6JZxQqOBDNT3oQoIDxKsbwDgha1", - "sjVLzaYP/kYLuGoEHm+kyf1mtQzNQcxdxZrurIoay3sIU6GSwIRPtzLbqpULmkpyerezqnSPBQibYiRb", - "mobcc7ApIjMVEllav/PrTzeXg0kpm19FksL8a9dqlT6Uy3/+oM6mWfd5i2l0wnBawv5GFSr1vdCsYcpW", - "bCBs/2BR87RYcwvO3pNSnDwBLNw6GuQD8PIak50btGYLlBGTrAStZjjxtThU10GBs0C+jzB0w8DDdjpu", - "nSdsYRbnlzRkHxCICf3t1/oK61bop8PLbvb4r/NDrkC5oHrhVS9/jGAAInR0FQZXie+DqQ//HLO8GWmr", - "HlpEYcwmFa745cYRoFeczgyReTI9csPF8RwQdw5Jz4OP8u9jEKHjx9NjDONHGB+HgJ3R33qBGKvzhhla", - "1wwDSxbjCDwF0DuvZEfFRs6blxmzKnd3eUD+rSEFHdCe8JIETA1PDQ/WT1i8cyo7axWoLdz3LKpHaTh0", - "SxWkiopqVq/AUD2qfFCua3lYbSM3OLvFg0Dl5X0YYBg3P/KQ6NbUgcL2/eJILdi60wL/tT5aMgOIsNHI", - "6815GNyjmTbfSHX91bWKI69AfIWYI2twchWGyzOJyHfNROvUllLt46rW1OXWGxkNpDmv0nOmm10gCuyq", - "Wn/yrJAvasUNQblHJ/0WfC0q9Nu1ClUbYDelFZcCilPgBSTmC9kELcTT/RYtsB6MyNyg99JPOWUCcS+w", - "J0BgfA98Xz/kzhTRtcuNbUeTaCg4uU9DQ2TRU4R3tEfXz6bQaKzrG7grtkrLD6S0rOYMp+oAa9V+5MK3", - "cMRe5A7qVQ7dr4Uj5CXPUUpNLBF6o+NUHH0bO013lgSv24liFMpaKRq/cfHVREr6um2qPlvj9ita10f8", - "58btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW3l0FsGjIw1phWg5XsAkplh+fuIZDN1kvEtIGb", - "L0/zm3G1NdfztvJwtQwXVeIDv6qsqQRql5k0Qh9NIWn9myHjCgXJ+dBCHRHMIfBgbHe687bFTRTT1uJK", - "makr1/G1SkT1FYGUDyTtpoHmXVM0pDJOLti2qIRaZbSiS53SURhCdWhMMFWRTUiUX2sHKqAsHbUmt1U+", - "+NSfUR1vvlDxNv7QP+106X/OXv/O/3h9etbpdj5dvK7GXhrPqskiq0xkHxub9mIJTN3Qs6hXlxthIDsx", - "d5pZAEgSww9r0zEd2knH0wpMNAtYcSU3hobLK2bfGBumsgzNAqsJigG8KaIUPOlXXAStlkYGCt7TsOLB", - "/2HlCscDFhTD/7gdXVaTx164zkmdxtIhJtWBTWmj3DnwfRhUOYU2CNGrdICXz+6FI9GJJXCW2n35gFa2", - "9v3gajBicvP9cPLh9i1z8xsNbwbMQ69//rHT7VwOrwZ95nz3efh/THue3WA3H4Rd6aXS3LdDmilb/47W", - "v+PH8u9oXTDKDydrGmL3+yHhYOzYDd/Iax6lNRZv8U69ltWbtc5M3tm1Lf9EnXsxTl+jVTulchpeQCLL", - "KBScfJPA3itBpGDAc1BvhVFj0Wn7d2GsgUc+GLEMHDZhP6xhpozkvQ3WD2Tg4ODNpZOpdeAox3J3cjiR", - "6JaQlbc2rw7kt9eriZ7ZQj1LdcoqYF/q1UXVjho8uxgwvqknmC86lw+JIvNidhT8V3BMUqNE++9FSTet", - "Si60fl5oYqMlFhuZPEWRCq3ym8SGNNSybxL7jQxtwiBCx9XtdQ4lPMmXufjAphaJ7UwCVK6KDOf0FHOG", - "904QEieKw0fkQa/rACcGgRcuZKcn5PvOFDozGMBYXmNU6jrbGsabo9nbTwJcbW92TcopnLXIplLLbLrY", - "qeUlL36srC+5LkbGFJf2O2DYN/YkCgIvq/AY86FWu/IvIJmHXqPVCtA/8Z6pbn8eegaq/TCZ3Mjc2W7o", - "pRQsDT32+QbuAE84wGbOTfzVEuHVJCRQWXPOZ4Yq3to68ZiWAlamnU/p1mXGrkmn27m5HrP/3E6YlmQ6", - "IXkQFq6K0MLiTYjXYXJB4EQwpnR1ZF8R77nbAY8AscusOetbLiFSeVr4DboJgY4bBqJap780eFUiHLGb", - "tTbDF6U6lOYOBBijWQA9J+vELE+3t8MLR7DP7m+UPphCH1eXKmVtGEvl/E/4MWBHilyg0nF0W+YDTD5A", - "EJMpBKTKNpDbKlZ5ltWMAM5c9s7fys9Ozs56p2e901eT09dvTn5/89sfR3/88cer13/0Tl6/OTmxT8kC", - "ODNT9WCACZj6zNi2h5AuwDcz4S/AN7RIFptjgO3rHWZ9I4YuTOutYlPeGdqGx7XwenthvAoBj/JzaWg4", - "FnWKsjqluDZHFHayXk4YqPvQALLivFrokoASzDC4D+14daR0oIeuH5rOKQwXIJqHMXRoIyEmVkTzWI41", - "ZvPpwvqtC2ZkU6eZcM4nw8+8OHf6503/dmwIOraJdOHISqNc+LlpzA4mTnIu7wtA1hvzeO/bOt34dnSp", - "Gb6pqszaa9UcRZSXTvnKLL4y7xPtumnHoYqC27zQds3k1UlLK/Dw8q+yxktBCuQoz/yFatsgmCXiScta", - "LIwvPmJ+LPLOSrHqcqodvdomJNLgG4mBtgH2HszDlhbHIFKV0+vLPss2cPPPyQf2QDL5581gfD4a3rDc", - "Kbdv/6k39GQMrXp1DC7ffbge85wFn/pXfZ4Lpaq2fyqNy7ZMlUT1ATnpLxb+vt0G9U35uSMrnOrrYv4d", - "Tg3ylX7RAWRFpn+GU50834kCYcScrIan0eHAbPW1pkZGoL2hVL8zCX+57OJQuQLxUNNMXChvQhKZlcZf", - "zfGQxiAYRKN4K+Dqo67K/wwS5Turoa5xYwhk9g6eTG4GCRYumGlXZ0b7pkeeYoDWIoyVeR6TGBA4q01z", - "rUB4mevXXNHOdOl8BehiotxXZ/X2CTl1cTVdLVartmh4ocvnlwI4vNDiUPb+iIKcReDd7dX5ZMik7cXt", - "qP/2kmpYF/33lQKSDiKP0UYUzGbXsJf8rj+b1wom3PGxrtfmnyv205gEiTHJR1gVF0hCAnwdxaY89gCX", - "BucYOTwlS7vQQ3kZAw6OoIvukZtN4vwSAYyh5zwiIByvf9VzhRERDTynsl9vlNYkTqBm/LqHSNUFKb3d", - "n56cnBhdirTD5J2AGvrzNFrQ3+FUijHbc9xQwWDtMF1+Iu7aAsbnFlf7lwEh5xWzSQ8X1XlB6+Zirpnx", - "dtlg8InSq+x30lAlMXqurJMEOxtI9UlRwP5aLUz25KKneK/YHwqjJFgjQXB5lHcI+rlzX80/kdFyToop", - "krFmkrH0ymlldyu7W9n9UrLbMMcPKNor3PpWEM1stCGBC7OjoOG+Ut/ZWDZuzHJ6VWeOXdN1KksbtvFs", - "YBsY0CDTi7lli0kWxKK6JUQqo9ZRTynl6c3g6oJnOs1ynmrS2eaTn6Z5Ut/2zz9ev3tXe0qyaVe6N+cF", - "ipkYJ3lxUnQcCYMbRfKXYKUNxu4ceolfEd1j6Lz2cfSlmPDDUsDUbDbm5cCN7jS5PCNbZMeqwlq4dhFG", - "IwFLHdyEjuRQ57xjnRZaaF6aP2MIbZbkqoTUkum0HwVzab9JHm2e5rpqsRMw06HX5yrj+ib/YMNZQoRZ", - "l0NYRT9CKJzH9CJzr5cLWpbmfHmHDNxYNyHzItfOyOTInXh53PS0WL/C5ppBAW8ayQvT2IFVBk7xs1nl", - "nqtbevRlGtideIVojmaeK8UoTzf5slUFhqLNFlk294RhsyHqqwdLvHcPEp/cVKYLEo2MaYOsHgnELfJP", - "zA/ehaGW05/j6yuHA12OP2EjaAND5bPgCz32hbHH3Qot0ICF2jFBCxgaqrxggtyHpcmPhH5zsHhWsXtJ", - "VORFA7ZlOtjjaeGlzArHSp8xz2KkQ/ljRtnmbKQ2C3xS3rNt3y0aZ321vgbKZUnCyA30tZ7TGVlt8m2o", - "CX3uxZ7sCuHcoSJ7FCqUx40h8/U6N9fGWIBvNS2emin7pgIZPIQhofKXyU8O4RSCGMYyMQfDKDtW2M/Z", - "pswJidi1JwwfEJTNEd1V/pN8O3/TEbG4WV+Ro4X2TjAJF5aTPTOJz316NG7wfBanfzNkdZsIs4nlf00J", - "sXN6dHJ0wuiYRyN33nReHZ0enYjAYoYJFjzsi3qnM12kx3v5PE9bBRBjJ7XH0E0HskpH51J8f8/QID3z", - "2SxnJyflgT9A4JM5Q9Fr/t0NAyKyQ4jCyLTp8d+Y8xVOD8AaPh7EcUil8DM7qtU5r0KSriNHHJ03f33t", - "drAsRkJXnTWUPiV/CZjdOXQfOl9pf4a/GAJvWY9A2gxVYXAkG+w7CtmCHRI6wHVhRBwSg/t75NZiNMVA", - "LUofT4+BT0VKMOvBBUB+jz0k4+Pv7Gf1t2eOFx8Sze3pgv2OHZCmrKLdHdadv02XdqFPWwxoA+ZqwUdg", - "PBODBSRMH/irwsmnNIMjEnZ33vCA/lRolJbSUYUafx/Idmy94rJfS/T0Wxlb48R1Icb3ie8vHY5SL5fv", - "q4S8527nt11RXt9ZAJ9iAXoOSwXlyfgZDsarjYOhg+JdGE+R50F++8jom9NJFZlJip+wJvSw+taLhcrB", - "PvC+na6GML6yay9xNem++XVrHRLnI/wYJM7o4W3I5fFGiIFjh29aAXFpAFaZTCqxRUInkTjPY+NZL/Y3", - "shDtEnSw58QAB7QVA5ZigFPL9sSAekBGqEfCBxjQU1H+zU7DKNTF5o/gY/gAHRCwrIOstfDWSmcsiIkI", - "TWgradCh3W2kRDq8QSZIWPfquIvZ8gSdM+h+bKLGTahakA7d2InYOUnG2W9VlJxueY6CXT9MvGP1hm7W", - "oEspz+S1hw3ioAATELiwRMTn9LN0LzEr1tvHLQPESYI0lnVvCKxGa+cIVt/rxdZ/Ul7YvvXkEL0w4s4u", - "4kRT9pubw4+/s/8+V+03lVKs1VFpQ5lVnG9krSTi2Y5NygnPRbhLIbS5zRY5gmoOb14M5FGINY4NtmOt", - "bMuRuIKZjLw5iiukGqefr2YKP64Ta2xbUqlWQ/MXqQD72en+gpFwS/v7RfsLuPIZbjy9d3dwi9RhTWgq", - "PRIP5CDfxBFOxzhmdnq+S9i445cI0wuQ7+RamzaYth7mG25tt+lcYseVKRtuvkzlklvdPhFCuvVsIwqb", - "UN7/3CaHASIhlebH3znHPx9HcTiF5sulfPt0QK6EArPr8hIMuUB+M8OnU9+EmIyS4IbNa2+bMh16qeTa", - "8alXQVAiJQenJ4bfo52eClchYan0wxj9h6dbF8l5eNIKHqVZMnMSgHzoOdxu77Dtcd4JeT7MtlV/cOTI", - "DPvAfTj+zv5jYcV3xrShUikkTznsq8hyZG+0z41pJB4G4l5a5/M42SfV5nQ3YNwGGQnziV/vZmKePIvl", - "IAS+Hz7R6XUvAkWqlaKX/V6lYnGiy3NMgI+/4wBbccvVWJX6ZX4JcAM2yQ9mZhRxcu8dmxSQ0TLKHjJK", - "iWBTVrkaVzJKgDVsIhUXxdqkV13ovPJKXGKRxm9jL6Z/dM2GAF5haCVLgALD2evXOSBON6EDRXFI/wG9", - "9gzbI9Y0XSJZIQIHRJGk9vKxxtsU+JGAqQ+PPTDDx2kOc+OlEbNbI2vnkDkgzhT6YTBTswqk+bLBrHyl", - "/Hx6AVjd1ImoBV5vLpOZqrMELTx3NGOZfycwXmY844HZHfKqj7ltRYhYyZ0CvC918bGm3o0Vc78As7QI", - "vjZ1VoUcolPK1z82689tJex2Xu9K+NFbKFpEPlzAgJR0A2a8kHSQPp0D/KCVMKzh8Xf6n5rnJV6yYbrk", - "fFMUIHQCS1M7L65vOvQpoDs+8gEhcBERkZfFIBREo44KSykWapt2/EJxikamN4bVn50/f+N3n+3POlEL", - "yVNN4T5MeJKmPRERGT+XRIT5zkBsRMixH87qdBU/nDk+CqDMfCTgKEqUy3B2iQJeWOQQpYrI8kRCh6Un", - "c6ZLg2RhnztaaFBAWHXEctClIfNrTETe5dCZQUJRzbBsmBkjbnnUzFyRusFwb0rT41tNnQQE+RuYuu9Q", - "edcj8BtxMASxO3fYTEqx4or1sw46kV69VkbB8BH6v+Bf6UQocP3Eg6b9pS1xR6vtVgt8yQJ0AFvl1pPJ", - "bShgLErFTHns8910eZd2ykFpBVwpp47VIWu1PXtw5KpCqIFCLKJY23fzvFaaSn7l2LkMZ+ufOvT/e1no", - "sPl1Vak4Zjx40oJiP8DRgx9QZGL++3sMN3LubPWk275Kne31Cg4y7bW3VatzMk4nYdZXsVkLxUTvQv/Y", - "g9NkZjbSDx6Bn7DCRc754NKB36IYYhZUC2YABTgrBCYK3XqAgCONPDyH/gWb6lBcCjYf0fL59HxwyZBQ", - "E8DCMImpKGSFb6mY0CN/p3EsKvgy7WKNqIOCejzNGlq9Rn2JmyazEospPH8+uDSzvBWvW+g1/AEgL3rS", - "8rxFfm6m2+zjG92PpN9obrTSmP8Al1i5KBmnpe2aXy8ZGYiI+7qL5XkYYESvkoLE2CNT6LLMG54D7gnL", - "QYOwI67t2zQ2VMMyhfdhDGuB2ZT54R3fGhLmoAExK3QWuohJ0CdE5upbXLHOsQa+LK2EYWe3/Exmv65c", - "Gn9nAYg7R+zp0YUxASjIQver1plm44MrGUoKFcytF5duiVjldEmPOxQ7/LlSB7FI2Pei2zJdOlmG3MxH", - "nNXOSu8lBptKOYGwdiGaYg5ymge47PFyQhFAMXZ+8SATfJT7lg5w/vXmX78WxValE4SdYQu7YQSt5CFv", - "absu1no9eLd7R7W/n7YWqDoLVMoblmEbDRS0Y3YMW2pp/Gy30tQ+wuWhKGtbD2OSuGjKCAzdLTPomMER", - "2uMWGOL742mvQeAq8y0gWO9f0CSGdY/9Ck0wSUwdKHOK/WkPqI2EFuImYYUp5VhxJtdxbI4p0bL2jOIq", - "aWtO2FdzQqnYroUCXXv7rJyidEVkl3E+59H6pSaa3RVwMsWQOC4IPMTyzEi63ujtoWrFzi2GHmMjDguh", - "1+MyPIBImyt7uzcUzdjpxUNh7QaCXYqYVrLntS2Jl0y2c/xW6Vpdw9vOOSs15AAngE9iYKNo5m1/7scb", - "hgKODpsHHPZ+k5Kywwo7cav+Lt9sBHnUsZ4oO6UA3D5J7+pJ+ip7hc4xfMqfKW/a87y9FscuWPxvm/BG", - "UCcpGifu3C81TnArYnHZnlyL/rKVYuIwb1uWokHGcrZi4SXFgi3rdxXCpEd/RShGqsCbDSZ8tkO2mKT8", - "/JNz8Swk7eFutJiscMYWGa0yTXD9sXngAc+5YzNNsvuSDLeNKwDfpJWvAC+QfNhaPsh8w618OLxT3kLZ", - "Z77ti6xYXYVaICSjDAR24iRwRM/qvMXcg+ISYcK9KGRtvEOVaeVIKAUNNf5JFoCuHRxVD82mHJSKtllm", - "/Q087q1jnj6taYZeyNOFws2r1jFS/l9YTTlgAFpUuaPt72TrO9Z6q8SWpUDgb3zMVSqtvJvF3xqSDfCG", - "KJjd8Rp+O4K8r3Egeug9Cp8ei0eCzJPoblHpSvSyRmwq2EZJICVa87hpVYq2OQ72J4CZ7c0iPajsYizs", - "T9woRAGxPHcXKEgIpNdx+VcMwYMXPgXpUdzgGH4PyQ2d/NAPYXbgSd9gJXRHGKw7XaVK/dnJ2WnvhP5v", - "cnLyhv3v/xrkjujev+c3kU0ckAzS1HNYBTWk8K0B7D0KEJ5D7y0bvDm425eNOVJbQToyPmnl457Kx/zu", - "bFxK4mOXlQI3R6HxUuFpPhqdvONNfu4HSoYCpqrUFEjiOb5Cx5VI22kUGZvUhx7PE1b7Mimbt0mi2mjZ", - "kowqSIaNS6YYRj5YVhV3ot8rJRNv8lNLJo6CJpIplkjbpWTiYNoKpli0buVSK5dKcqkgFzYol0TqTxvv", - "W5levc77VmRvb91v99n9lpOLQ4e1i19j7a9o81WCIQVNjNNRbO2tkuisARUdKiCtnuTFPVxV9mng4poy", - "cvsWn/dxTRGTyU2B4rW9XE1FLNJNbP1chZ+rwEeTV27JlC/k6SpppImr6z4mP/+5fV3Lmc0teL+B2sTc", - "XcU/7Pxda2XGgXu80snl26Nk4Xrf1wwrZmB3a4e25X/pz9ry/l64utSyd1cltxqXVkm/wqdVqIcGvj1k", - "t9aCAvyj8aj0Vm151OCuWnNMwoCegr0YENhjN1C6uWLvLbmszp+19lg8cI/W7XLY9rxTf1zFXbqotoJh", - "jxR3jTxY/WTX3+BvQszye6DADRcomKX0uoAYg1nFCT+CLkSPrQxqIoOCxPdLlB8snQgs/RB4DgocECwd", - "sdpuh8Bv5DjyASpQWnHKncgQi8ykOTzdAx/DVrkw1B3jjKdht1U53OaeLnyGe3ES1L1x5LMG1r5yZFkC", - "25eO/c9bikUmR6u3jp1lfWR++CD2EcQs1zW0Am+LQQE+IE1A2VjFlL1x/LbMVXMg0QoUiDSOzibDDoy3", - "7OL/ZQ7JnAsAUaHGuei/x/T0CgN/qf6e1g3UCaTAX97JBrWKyjQMfQgCi5iOXBFJC5y9UHiHptSlMc7D", - "IrPvi8V7OPc+mLGj9knQRRgzBwyVDNL7JQg8J0wI/VOoj5jqj7SB1AWPnAt4DxKf57v/F6WHfzno3kkC", - "DNkxrlu+mOlODtqpJKGd1dNr+gLcOg3tW92NnEapKrry9xH9fc2XKFXDPfYQjnyw7DF3iRp9V7Slwwr3", - "ivC+Qgmu1oEv+GDM7eKg9WFFtOL0HSuHFBEvKdAnUGdWBBRZ+iLlhrdsgteSQCu6WtHVVHRJPulRPqmW", - "XDkeZdqDPuF/lt6uQnINxGBD73AFV3vPbe+5P8k9d2fHWSYX2tPsRzrNcqfHTk42cb02h/1MeAPpVZq/", - "sFccXa176alAnYKUmqfqHCmQUPhv7vqNWtGaIQHIx838TFUKad+bim6fBQbaAIPn+Zn5fCq/1JSSyJMc", - "CDzmTJae/yRMr5KiWNL/dDxGFP/TcSLDg3RGP5ZuZzkYuG1zxnoaXoGV5R1sLsMVuKw9xff4FC+Gv1ky", - "dLdE0Cuw+LEoGVfF6YRn+SIJMxzl+f6olovHsibdirysTq+o6z8ma6vXz5al99TJ6zxMfI/H09KLpE5z", - "2aPcJDmuSgtEvoisYcmeLErssrBcHuTOLfX2V4e0yLy10evnqUiTiVWtAeTHlagrVXVshWqrJxVlF0EL", - "FMzqtSXRrrH0eg/JRExxsHcfrQzyYETmPGMJz2rmuHPkezE0uW6wDg2l3/YFCd+cVpIcvCSp4s9NixcY", - "CZki/3w+BrE7R4+wTgsSrQSYtLtWhIwJjIS7bl8ObCE+5HhG66mEt3XdXV0j26ZMEvsu9txKKuWTSrZ1", - "QXefjynlukJOprKQyrG/wvxSPtHtp7KpSjSlLFwvk2zuZaJ0v708Gsgaq600+kmkkf1dq5VFhyOLFMbf", - "viTyw1mdp5QfzhwfBSXdqGyOvgxnlyiAttagVgy9bDyTDx+hb+UyxFvmZq5iBkkHtNc7BH3PmEEO0oPX", - "YbMpcFQUM2EdmgIy5r20oSSABQqEsVe1fvb57ZKvpeHk12pfAx749B6KoSui3SuguFCarQJJ1n+7h5Qq", - "DdoC+uumoEulsHIWXIaz5seAcDSqSG3OPCCw8CQyOO5P2M/nquPLph1z+OB8orokvdw16WVccTiEjZxv", - "BFJ/bBpfwesmJbY0O63wpykSuY6iU9e5WpMxd40RL+yVBN40IVMa2CFmMD757MZb7mUpXqZMaql9t7cN", - "ToxeCPlFA37jJ3CpkIYts+UymlbnYAr4bCiYVfPV4WRi2pLXKUdAk8MtiikiCeJxGS9QuLM959Y/5wSf", - "rMB6FefdMfApYQSzHlwA5PdmcZhElQ+nVLmTt0BBXmwMhw3giAGKrNunTQa0xXva4FAinbZ/EuoQ07Dk", - "lHETWt7JvyZWUGujc8z66lOeq44xfvqQCvXmVsCN3VlXQnmjq93pdtl7hRNQQ0MtX2vvflpu2+wpeYwh", - "IXWuRZjtnuziyC7V2QwUckHBbCz6HEhS3x0dkwpi1jgj1T1pWUlzrdOgaWN8FKEeCR9gTTI8p38zdHi7", - "aq7pR2hCm7X6JD5mfkU3Q4YPbJE6Uscn0j+qtaEXlUdKkRy1CjOkP65TyiXIqN2O2FsdkSFA0rqiFm7T", - "hFGctOWvDYfNZszUkMGqDhwLbyleXS7nMmVKu5o5zbTpVvfaPeEBLq2cE2i75ulnGBl8hEubvCYZTKn7", - "8vAC2+bD5LKiMYDSJXp4sSKIWQzaGql8bCAcJQGPoxSGrxdx9WD7+TKOHmzqPXDzUOFQnTwqiCXLIASX", - "ziPwE6jPIwS/gUXkQyqyH+Dy9A1retrp0n+d8X+dUfFenW/o02bTDWXL4IlL04xD1XTOGg8PP9PQSpF2", - "rXdNYPa5VJQWhtz1TchsXIMO0l4BGAIYLmrMwiIx8Yu493BKaGLzhbzHz+5dffbfu5l1JPhTqKfwmwuh", - "Bw3lHPneNODz+ovJ8TTxH8zudG8TX9QxgjiTCbhSKNA+P7FgoMtvKBzwS0oH3Fw8tNEXeyYfGJuqQgJv", - "WEq4IHChX+F2y75zQ4aSODun4pqkBncr4SP8zAoFQ4C9QiEuDDGMfLDcuNjIHLbov56yy/KQJyfeVhEP", - "+UM4/Ru6FpoLQxrMcpS0QmpvhdSIUep25BMzo1naWLltzsLO+hEu22e9zNi40m2dIbu9setu7I6w/W6S", - "D8RpYDynOQ/iZkfzSB4xP+vRzBGwL0fzZsxqHLhWq/9JD8zv7L+9J0TmPfmJWbdrw48AAfzwDCoNhBeA", - "gPeQfEFkPpFsXys/JPvoxUcJ5F2/Xf7wpzzdtFXSMTCqaE/5vC+bghlr3u1qiLyan1HwiAhsGjAhe+md", - "QIfsa6v7St9PBR8reX1KbLe+nrpwiIwWtxQDwSeopPX2OUuJeuAosQt24Lh90QgHDu4qgQ2CMH722N6z", - "sx1pvYDYvXMV+VYnF2AApj7sxYDAHhuTsofgtVX0YiGF5A89/u9nLmJ8SGBZ2Fyw33FqRrIRNLzPwXrv", - "5bm+GrZeio5DP/lrZQunkH2WLTk240SYkatJF83vY20EfTNOOJwo+kPhhO0G+q+mFbxYqL8l53L4DoZz", - "RQh+Y86tOvkWcDFlzNfoBil76Vn8E/va3iAlNSr4WOkGKbHd3iB1N8iMFjcTJCjGO/7O/7BQAh0ggHDu", - "43BRF2TLqeHHUAXFsk2w8c875d3ftsK7q+iAPwfX7lGu2itDatqUSXMb00BedCUhW6SRKk1iFgE/hg68", - "FyJgu8ov3y475VegY09SXllKL40eLPatFV4vLLyMcmUF4VWl9URxuIBkDhPcW1Ad1K0vX5R1cUSX1Aev", - "LjPlTdr1k5jsh7goEPiNHEc+QAWqKI7U5A5QxnLLlC/NlJQDNPuyqRvIvxOYQGs2ZK0bc+A/aK8DYr7D", - "jmw+pGDV7dtDcrS3WgYL5xHGGIVBKxP3SSamu1OWiJJzVpWJ2VOfjat3nD421vl6jwCBl7Rhm1djn6vT", - "biIHQy0mt5lpIaWzPci2UIRlV2U18rzWIJhAYefWz7BgBVdxk4lb5m1xyX9dVeKKHr0o9JG7rE85KTs4", - "vINNwknpCn3DerTpJo91aFnt0aiwG+3j0c6ztmIfuA/ViSbHtInzBKfzMHwoP6eyz1/41/Y5leeYVHHS", - "5PZQQPU+scOOKh7fBiAh8zBG/4Een/j1bib+BMk89FhFD+D74ZO+2jLfIKYHchZQzzP2cS1GPMYExMTI", - "jmP6lZ9j1/2EzB12WSky5C2WzzYMoGuKUNbzEDnz1cmZBg8q9zCUiWMlh5U5BJ7wGvFDTjA1Fk+24dBN", - "YkSWDD9uGD4gSAdlRZG+qvTAUJqfURIC3YGV6aAu7+/4alwkwIJADnArh4UcvhoPVVQ1kMRFLLeyeO9k", - "cZkRUkl8NV4j3XBhYB2DtdEYDAF5/qrMMrw5ms1Pah1VUdzVlqH3iKGNnGfJ0ZUnqqjT2dvFk5UoHX5o", - "L1fbNxfoENPMZpDWs87tTPuosg+PKunebPqZWVdVvZJ1swLqznTJGapwenNCPBA7XndfK7tvU2KILVpR", - "PrQSYWelUFVafAK8HmqdiFAPdfoT3ehVq2xXy4nanIB9QuAiEsktWVtFfJgEx6ElA2wlSJVLPMLMV1qI", - "EE4E/v5dEF74Ea+OUXbF0DGkHStyh7Eki7Y8zJq3LLyP2cziJBBbVePRjoIoYf4Q/HFXt9znvdBU2lxm", - "FfKFbfhLCJRsTZW2AN5MOAvUCZf3kIz5sK1oeTntoFmWXoOlQQzXXij2+UIhd2krUoMA/NDDBJAagyHA", - "D6walLAU1lgJJwA/jNmg9iJiePEj2gZTRDTgUC2uWx7dAzOgiQ12kR5JeM30nsL4oSpZROaAbXRpar2Z", - "smASjoovDKkUIVVVPSky0oAX3tGR29E+t+3b+7lC/qsnMRSDmFjop38nz/EPx8aOivFqZvYapSCUW9ty", - "7v49lKuMt9Jhyaii+iGNnpBceFd7yWdnw09/WGaYaGtebyRDtdQe8jF6q3tXSkRzQ1DzWhRq9V9NSQql", - "ZG9bmEIpTKHgBdcYdHP1lV+uTIUObuty9oqtN0cw7SV1L8tX5PeoHA5cbUpqInC+q/+s82PJcULtCSzI", - "9JDdWgqsrwdNxeABqwliu1bNLNC6uZjj+vMvSPUx/d08Ta3Oz8fsMbL2MYk/WXKGVoE+quHrIRu9Ze6X", - "Z+4si8mNUoSSw7jOu1MeR2y7W7P2jszaX1TcBzb5Q7JNaqoybE7i4DmI4Jb0iDEbu5U3B6NM8A1rNYof", - "SKNIY1eEz1BlZKio1M5Y3PfT93Gs0TWqWJ8FTnJXloEs7NfKgI0DeAkwcYYXLGH9HDo+kDtoSlMEMBl6", - "xjxFr850eYp24GPbpKBnqSxfaxLZP9+aFWSJveONnSzEVi8TrKWdRvNTJk7z4D1IfNJ5c9LNiYpdpFBL", - "5369yuRjnkltunTYBPpJxSdzPoddqF3tY8/m9a1NpmRMx6wNBjqXcQ1TQNx56bGnSmM6nGCgbXk5KO8k", - "HBm2bvsimqT8VLLpx55IsdR8T5W+URIMPZxLPbsWgsv5dhsahEQEUvt6VJMejZPNLl5u8LEbh0G9RkJb", - "OX+H0wwoEqPZrNZ94jwOg59aTTmY/K7pxiKPTjuDJFWJj2rSeJsublu469KZm4J3VadKaadkFN9kOtqh", - "+VSHmaG8ImfudOnci7y8G0vdq0oRbJ++d7rcXgZfRSnYcQ7fHDLW0NDbY1ejpZfOuS2p6/TQPf5O/9OT", - "v9qVuSsfxNYPH5RwDrzoXbp6E1g5jO6+7J1lfTrtJrb5gYv14vRoavZWkSeIr8/dqsfENZnrkN2T9piz", - "tnR0tsfmIRj2Gx3WG5EPdeUl2azpjNbC4cBrTe6XfNhWtUlVQEy4gcPK1kepgJdwtLHt1akKajHIVlWo", - "lgOCLbchCuxUeXYc2D7oqa+M9W5KrcFsnw1m7BG5gbWMtd+hqWwf7XgRiCnSDK4rBbB44y/qY8aO4NOk", - "iNHCJpxEtgtXXxufxRIRJBha1VuUbVexbo1ZX2FnsgHuAQWeFVSsYWOQPqLAq4fm4I2pBC2gA+4poCXn", - "6SeAZSyzuoTO2cnZae+E/m9ycvKG/e//Go3VrHufTqAnXnqs9igUHdtq5BTiKbwPY7hNkN+yGTYJcwWW", - "71GA8Hx1mGX/neJ5U0BvFNPbexwoW+J/2qeBou7YWji24i69nTcB5iFtk78fOAI0etDl2V9N6G8ZCHHI", - "FahbNbxVw3evhre6ZatbvkgIFF6zYjsTQG1lkfrzfQvV07NznoLqJT49HmushmnLVeyHY9m5tSLusxVx", - "e/eilAAOynOqVaZaZepglKlsGZmo3ohtNgXJisFTK60G5q3GSJYkTGt12KxWYtAAtquXHE8T/6GXeSLq", - "I4reJv6DcGrbkKJCRzwc/8Qt+SGUeSpDi23Y0bR+a3ZbR6RyTebEcyqJxWm7VkJICfHWap+3Lim4u0qN", - "pOCNnF9iKHv/ukGxcTjOVTsVGzJNZwOxIfZpf8WGXFON2BDraMWGQWzU7vM2xcb39M9eKWdkbQSEHuSG", - "QuPA4yA0ODBWM9Kiem9DI/S72zo8FmMjDHhq5vFooI2aKImNMOBBVyg+KO7b5oHc3vUPPYZi23KkOpoi", - "dx3YkGQ58ECLvRcu24q9KEmXBvVRMzIq53182StLrYRUgz1+SuXnAKq/3VZdljYlK+0uUWkKzecsc0tV", - "GSsHOAF8MudvsU/fIuKhDqfoVX0mkeqcmZWg7Ug0cmyvGpYmKkcbN3+nsrFZ8K1aq8sMfysZdy8Z967Q", - "iRB0VVS+ndRZiizOOfXo5bHUDYREttdwdYpRK4V3KYXlDqygmVaodXuumKoSuFVMW/FrEr9CIanTiTcu", - "cnn1vJ4bJgGpiZdgbWQucln2ETwC5IOpD5n0VcSN3r7wHhJenQ+fsxkPXvTWpYw/8JIRuc1a0UzJSYWT", - "T/uCaHCYziFptUISefZPMIzxsZvEMazmbMxvB7yhQ7uVuPcWw/g9JOdisC3SHZ2pIZ0xiNsCxC9fgBi6", - "SYzIkolxNwwfEOwnVHb99ZWKqkLSoTy5SXJn268h4xki82R67ALfnwL3wUjO5+Ei8iGBnKav6fyO9jyi", - "E3F71Hs29DXF5bkcvkDgr07Oat5eXTGvV553DoHHDrfvHT/km5Hfh6JYfy4gM4c7ucD8HJbowwTEZlEw", - "pl9XQxzr2hxrDJ7t44xB1xBhYTjz4XbojQ39g9MbR9+G6S1D3A9Hbyh4RARW127CLJpJasO8A1O6rY5v", - "OsKE9R2KubZ4iqsTWTmz+wjLjckvsNUXrY9VVpOngL2M8iaaG2KO9o6B68KImC1vffYdpxY2MUmJ2tTN", - "530627En8cH5RIohyWAAqqA+vnId/bUeUyl5cWyX9t6evmLIqltUVNKn35vRF+/T2VZdejr4BuiLr7yl", - "r0r64thegb78cIYCM1ldhjPsoMAB7Gw8qlAwLtlAW3LOoEcwHb+ekHZ3j/bD2Qx6Dgra6/MLX5+7nd/O", - "zna17igOKQ0wo+0gIIgsnZ7zCHzkscnopogmKJg5UI5kVngZYeuv8t3Otx4M6FS9GBDYYzZwqkPztxod", - "M4cJqeHmMCF27BwmL2+sEkwW7lmh7tZIVaNNM+qxtU8t4GIKYzxHUYM7nNLJ7h7Hz8BPWTeRlGKrBK6f", - "tPmFTkVRe6lb5VKnYrCeJCOA8VMYV7hSpLnYaQdHtq8SqTdyzO0pSedzEMzSifZJW3IZZF6KqFact0pT", - "M6WpmtU55eeZcW19KoYzKonjqms3b4ErVarUU2pbfC/B2CeOl8hrHxpbpt/MTUlS+WYuS9gH7sNWHqnG", - "dOQ9fqOqkaQNH60eYYwFCEb3J7oG0U66QGEYP2q09GFwH76H5LMYdKM1iRVIswyNp0cnRye6HJCK59Ff", - "adevFuWGJxWLLXhbVhD7F+jEkCRxkENe4aZDxWwSBJR/0im+9eSQvTDiKafKLPAEp/MwfOgJR7Tj7+IH", - "i/B3etSJ1mVHNf67fWS7GMjsCJZOtGM/MMtQcQlfe7C9vHGiGJ6ukqnR+0u0+GrFHMcCzzZmCtlU+NXX", - "cIxQ3LBtosy95ZvN+E9y6Ln7pEANxUxVxhWKlbQOiMBOul0te+4RezKrTGmLmvJoypvsj+ca72veSutY", - "zZwzrXiOO5lW+SxrzvjD8Vhu7DsqVtzaI0tOyaWAL3lBMfsgM7W6vvJjJSHbpx3YC1reVhR/7twwnRUC", - "A4lE2e7ioCx5TQ3KbznNUHNxHWYrnCbF4B6rRGDNarA2uBftZYRMkyRaKYBtgN4LZ44QxKpQzIrxMd06", - "DcueExqoXD9DoNiKwWEtb700b6lRaOswlo3aZ89dzfTAvWCwzeuCeWTYxsqLnKQ5Ltu1cmglEYrqYSsP", - "jAriesxZoyZalcujm5Svi5cy3mP60mE8KRuUx9sHftaUqOAFJjZQP3j16sF6wGZxmESs7kcGgtwoIyis", - "00e47NSmAdmykFizFpd8VGrLce2hNrFS/a9GgkumJjI6t8isGk2TBa2UI2gvJddEwy5HzvCeWbdxQqkD", - "el3GVT4gEJOUpxB27iFx59AzVYfKBP+eK1KCDFZMPPRi6YYUeBvlGWqzC7XZhbaQXaiRaBayAVu8auVO", - "ciuxLHxrDsgE8yPI5S1LOekwtZ4q2Mq7vVIBM1JcVQUsOv5NIYhhnDr+dbWugMyTjMuDJPY7bzqd56/P", - "/y8AAP//5O+Irxo2AwA=", + "di5G1zd3V4Mvg/Gk0+38z+3gdpD98/3o+vbmbnR9e3VxN7p+O7xS9jiDUpl77IYRVOf8cj36+O7y+kun", + "25n0xx9r+0NC6K86ERNDjLWXUsrObjaGk7XtUiXQo9r0DAaQYsQB9Mh07uNw4RCAHxwURAnBXUcycteB", + "xD3SiSG/iNdKAjXtxzMjglESYP1CFuAbWiQLJ0gWU6qv32dLI85TGD/c++GTEydBXoCigLw6097nsdwS", + "S3D5FtKOBEYjCDyqLemUbgptLL6nhy10aDeK8ac5cuf8kFM3B/Md5vdifgrUSFiBreIGdFWakMvUiSB1", + "bQSQOtoq7fsDXHLdz/MQXTrwb3Ld1T0w2FRKMPEfvtvoZVzUycPXLK/4sTM08IeXxJnJRG4NFCcywg4T", + "9uXNsD8iwgUiAfK7ciK2GP3x2+eHL79xrnX6svG/WiANR2GAYRlrRCo0ZYzlwKoGg49ihuM8DoMvgnUn", + "MZrNYGzcx4zKPilqT2lgNw6DQTXd0iZXYgPKSjMVe9qRoxiFMSLLImkz8SKkU+fNK3Z48b9PyyRfUhDo", + "bF3d4hQ4S6v6mmKw+qzW46xAdGmbVNSnFMhOUmWbM2Tox2IMZTfAg+4SR/uzU8jQPdsmdTPKY8ivUvSm", + "4zQ5FsrDsk8MODagc498AilE9ZzAr6MMa9nmja/GinXBuIskjJDbj03suAD/CQNHKvgOpRjnl/7o6le5", + "+vHV2GFjrCPGUk13gYL/fdpdgG//++z172WVNwXWzPXc6Nj3YUwGC4D893GYRGb5TZtgnbD0ESZ0jbyF", + "NG3F9ES0tPussHwPPcIum7G8dgFq3cprLjl8cO1es09yW+laqT7BLxkb2Vu5rm4nDv1a3Yiv5hOk+tiI", + "ttfioyMGq8OKGR/BDAXwM4ylQK+HSTZ+7nZg8IjiMFhAbuau7ztQOlhflLktfBN7wJAYBtMQxB4KZhdC", + "zup1LG5+NsrzbBgulUnoYBLGkD3C6OHO9gb7ycwgBv1ktvmFd8WbEzvxng0mSgaUnpIyTQLbHoRVSNUq", + "FlqJotiAy/bbVJ1oNNcahp0FJPPQqzcTKOj6xLsoxF553K6s+3Q7nFqGnnYOeYer+WzU3GQDwfzaYcxG", + "qhQ03UCF2XOwCsrI6CDdg1o6vUQ6eReBGQrS94aqXbxJW6aKPBPdT03sRSrfWL2L6GhHMWxcDN71by8n", + "HWYX1Zs11AGuYw/Gb5fv5KuyHCaQii8sWV6zkZj2u0u1d02tdQ2+JulLbf0RVmS1MrjDi7wAL77Qi/d7", + "40Ik/Y+SYJwsFiCutfuwrfpS7lbBklxnThfyVW64PBPzm97kRuL88uf4+sqZLgnEv9Yr76nazqb/uB4N", + "yDH2gPnT5ZT5XgK6L1BWgCgkyAWKoStBklIEYLfDFSSz/DBJIAvRM4Ygdufa08hE7+XXQ2Zz1z4iMy0z", + "M3fKhlojp8HAdg+QxdC8VZNxIxh4wh5dNbBo1mTkfycwqYeYt2oybpwEgQXEolmTkXHiuhB69UCnDe1H", + "T6kcVz0NaW6K7NuRehVegcfWOLHMYl15b/oznGoEeZWfHZPniqedOMX+DqdHW3ohLY2JCYzspdeYwEiH", + "2EpVmKAFDBOiX774WLf0x3XV4EdF/ZXXL7Z0nV77ZzgdJUGFdONv4Hbv2mmn1OHT3GQEATZczO5RgPC8", + "2dR/c4qs2lFKtLylYffWILoY4sTXm58xATFpthhMAEmwxXro+cTbyuct8QxnTeJ085tTufsA42oWaLJc", + "RSmtA1k5mAs917828kEkgaS7YOaacbpNUvW4GVxdDK/ed7qd0e3VFf9rfHt+PhhcDC463c67/vCS/cFf", + "rvnfb/vnH6/fvdNqK1SN0/uz2XrBFrtqNltMwl6WsPlpaafKY+qbo9UfKcR5Izx+YXjz0NS6OiiwiYl0", + "ZMaW6QP34QuczsPw4cUXqcCyqSWGs0sUwEbOecw9gn6migSVLPJI9cOZ46MANvHE4h782jnocKJBrZJi", + "6s1baGwSBWypXmtZWEE6w9cMVZfwEfp5w83bWypohlfvrjvdzpf+6KrT7QxGo+uRXqYo46SXJ6v9z0Gg", + "EyTi+8vfPSVZ6aUH/7jG/TM/QsMbqOhccQfVIED11freEe4zdxGj3bNuJ4Df5L9edTtBsmD/wJ03pyfM", + "CpzjrFxnnUun9M6JOBWmE59ZXasUWLT+z/BbeeRXdiNn69J6ooYE+OolljZllh0fYcJfN7L4oRObW5xG", + "Yv0PvcF+giRGrkYeB8nixu6KzehYXrSPTOv9H6tbNR8LccdUdsU2Djiyu07zEcWl+qhT6xCRgZqbpasi", + "RCf/R4BA5k9WRqWVzZa5wDE/Kr2LG8BkBO+Rb3iYZQ7KwoNZHYx5L8esI2ReRFtw82YTfQZ+Am0d52L+", + "1IodFhkjTL5i159Q4IVP+m3fhE25BtGP5nVIaaJZxwJ40HYR/Jt+Cv6NLYPuJQoUj7AMzTyG4z6MXejZ", + "en4o9wRlv+R6U6hylPZVpes9OAwzHtMeh+nnNQ7E4hilI5FjU2JNQaV2NOjCgIyV+2zhnYiBZ6Jn/tXR", + "ef+pBogmN9RVLBJrWBO2ZjIQKM1sBqULdNG/u5pH0o3oqndrAUtxdK34hzOECYyhJ2/2muABwz6nvsPI", + "c+J0HB6dhTD7DOMjjc96CXl2viKZr3LVZBYRKkYnzxGkf/08sRQjGPlg+UOFLfAlKWYqbFxZjjtedn1K", + "89cnJzXrLcBtWrXJjKR0tz/CCnY/W/gkdDGVeUz0VbCV3n9Y6/hLRy1YfDQDziAmt7FB87wdXTK/Lhh4", + "zNFTXPqxQ8LtuCCYjsskQP+mupEHA4LuEYxT3VqogyK2j/ujqiGxU+iHwUxCXCtlt+gOa2forXRxHbtz", + "6CU+VChtXZf2LbukdzuEu97b6wlNvNizwb8q6PE2Z/dmoVn0j/H5h8HFLf1RpwymM2/XTXBPHf7Kq8+8", + "/nbh3NeYxDbnDzhKgnPVCNz4MYkDsOuzVAHAZoljK8X9S6nDSzpOZkRR6TNZpt23if9wAX1I4DsWgrGi", + "C2AaQZB6AD7ApcMul04EEE83woM8nOkyn2viAS5P37Cmp9xV7Yz/66xJ2oluJwJxdkXVX50a0g0f8Uvd", + "hWxFatzAYM8Nt9h4fN6ne99M8pWoh0X9FFpptOn1leMhH4rpxgsUiH+e2njdVmPIpCR77Lu34ZUUibhh", + "TiX9UuzSLCkL6lblXKqaQ58TSh8NuBFyL+WJagDyLUs9QSnFZNFYdzPXVf7ykrzpyozczXNurEtVCvqa", + "sqC6SAlM89WZOHObPJMmLNkq32tRtApn7oFpW3M5eF5NKq8SEFEexWT+VjWm6tfhMVyAaB7GcOyHZMO2", + "75xdWe+gyM2Z2A/5E5joYe9QsaIdGquKlCZyjcDIiRO5sHpTg+qEVr9Q5PvSO9N+paWLRoWF2hr0Am9m", + "aOmqtvaCXZ1SjeqZU/almYMggL4JTPHZQZ7+6Q/TwZ0nPrr+UYWPcGW0o8spmD19xUnWsoCBhWn19Nsa", + "S6fdzetmg6+z6L2w3dlZ1yQiUnTn6aKrkKH2fCEwMok7vSvxHPleDPPekLU6L8IXScyyl+pSe4ncf0Li", + "IMzSjEx91Z1CCRLdiisxvwfiZquKV0troz8l/FA+x1SmianJtNuhB1j21C8JkyH2FzrHHR3A6f0zOTl5", + "xUiZ5GK6lNwym3K5NyzZTN4KWnO0Ll2EBXVyn6cKut6Ci32fDKIw5z+mbMSGHPEZh30xvdfUEmWuOz4P", + "k4DowTXf41Z5eM/6VGCoaJvPRRJYOKKLuIm0/eblQJgQE4grigjmGNa/F7YXO2RuPLCBd6nYmTU0SNuY", + "HtrWJE4sZE2TFaddKlbMnQdWN9+mFJiurDJ4QaCuH7tz9AgPUi41fxbYKxET0luivlMF18eQxMsKKbo1", + "flSuZrthiYpbkIIEiUf9jdpE7/tgtMgzoNYpT7QxJEpwzVRgfo329B2UEAgNyUketFiP8ONhPSjdwEco", + "Xydte49lHyu6e4diTMaQ3wDsae8SNO3VMMyMX6FyABZmTjGroEmN++D7W0HM+xLjnyPTWkLORLq0i40G", + "3Avg7ur67sv16ONg1OlmP476k8Hd5fDTcJJ5CQyv3t9Nhp8GF3fXt8w2Nx4P319xP4JJfzRhf/XPP15d", + "f7kcXLzn7gfDq+H4Q94TYTSYjP7BPRVUpwQ69PXt5G40eDcaiD6jgTKJOvf48pq2vBz0x+mYw8HF3dt/", + "3N2O2VJktti70e3VHU8++3HwjzvVN8LQRACqNRHqOEZBqhIIJBY4Gk6G5/3LqtGqnDrEX3ccDZ8GVwXE", + "N3D6EH/z1lWRjxOAH/TZTbNEA5UZVUT/BLNR8okEmnTUmY9lm8r7sc0knarRBQQa6Z/mf7XPF1TIGau5", + "IIS+Jx507KQi24fNJ5INCfCtOmtRl2bbKVZsgbHIeDcw5CVMrT+hw1pLE9qC9cJ6CxAIgL8kyMXXEblO", + "SLVNSQw4B9gJIwI9R5gm0kH0c6ybCW/r2eZNueTWTkaXZUNomD6wNqc9gysb/auRlArZLneb5nJLeTzM", + "2S61a94DNUO/F7qsoLOwx4m2M2KPgc/5VaFgJjKy490JCZ5gbvAtQnSXWVg7A6Z6fN6LT4OdJ1Z2gkXo", + "OyCGDoiiOATuHAUzXn+CIbhqfpmtkxMJC9ZZEQq+ZFnoowwPi+6pxIViU3wHkJ/E0AIU5iqtApJLHs9y", + "Ienn9AHmSzU/fWZxgCAQO8ueP4vph6sjfsA3SWTvmLVNHNDa0D7nXjZxAJHhaoKqNvv8ZZYEWoDNcmGQ", + "P4iknuiHLvBZcNgj9MOIfWYxx17iFiq9KeqdklF3e6l0n9PqJZUPwbJ2jahbtst6Lqvl6617FxQsanrV", + "lJ/NWOMtqt412Qi5tPfGU7zmKJKJhrO9UpMHGqmR087eHE6ClJudSXxPy/C/GEHZ56mkrFfX+hbDmPe4", + "SaY+cqtIgY1XkXJahXlvNl3s3yqbPhL7JKXo9ZcrZjHoX3waXnW6nU+DT28HowrZWZ2HoP5y1uQuVoWJ", + "HByKsWzVq3FxvGI8VooASfnF6jyp4WUwuhtfXk863c7gM7dZTPrjj3ej2ytmE7m+UmJPWP6U8+tPw6v3", + "d18Gbz9cX3+swH1Oi9IpkiBeVET2s+/CX10roHkOAhI6TyBmufJK6hXvrY+Ub5b0QJ/vYDMpDPjY5iXq", + "4V8vD1tKE/Xsm1KQXQKDug1rnrdgAQmMZfYCeY7ysZxf0BE8ck4dDyy7zqnzBOED/e8iDMj81xV9dFL0", + "aLMZmMWuRNRN6CNXkwuVa/xVl+C0LCBvqlEaGojdPPvVObgK4MyrExZQW4FqFEhKqQMpjz6fdLqdz6d6", + "UcJ9QncQcGiMYeXOzk2q+VQkzX9OBxxrgjLMVVLW9GOvdmHnAP2MlUvUldekFNhI0RCj5qYCIvq/PCBm", + "VjtoS3FraHpJQ9MWDUBbqVvXwJC/sh3ewIVfmM+TORsDvgEJ1iU+U9mEO045CDsRa+2AwHNcEAQhcQCr", + "S8sK3suk3aUDSwcd1t3Ha+1RwPNiiLFql8pp0dLQUTZP0Q8fAJ7rjps5wHN1yP+FC9OJA4grorxe/JiX", + "XnfO56watH7CzzBG96gOvcy6RmXQo2hOf0VxHgY9J8wBvgEYP4Wx7RzAiUQHB0Ni4K9tvGR5CEc+WOYY", + "Qe5fY0NWHrtfDQR2PgfBDEoEGZkggE9mJDLehU8Z1qRGrYd9Bb1DjszWHVUCkgJRib/1YCillxVfujk8", + "mVB+Gc5QsHrpttX4e61KbnuHcbnGqA7XMqnXQaHb7oQ0CIY93C1ZvN1201S1Gs9RhA/VyFoyOu/wNN/G", + "KcMn023b59PzweUFnCazTReS7Qp9FKNF4gMCcZZpg72WuWHie84UsgdSrn2AQJRoCmMH5DRmXUhPXfXz", + "88GlUvWc3Q8egZ9Q6te6Y/sExjdg6YfAwIEiG0jE25TXB+Qnqn04YUB/iOEjChPcE+7FYoxOVfKg8sTs", + "U3k+UgoPFbmYqk03uQrhwo5TQxmVgewGLqCfZEYyB8lavGwDWMltXpxIsxOZ+7ouBg0nfhqLVdjhbPQu", + "nZBV3MH4PvG1iqBdjEgZCzJcpORgbgyWMI5hiFOm33JLTNfFKg1yqyDzkhyPK7O0fz49Z5EQE4AfKmqk", + "ExgHwBf5AozmKtHMGV5gSYouCJwY3ovLN+IKOcAPlH9zhKl2Vu1cG85FYpcU5vMpxYdM//Ks3zAZQUKb", + "Yl32DWx6reDoYmhIl408zIXeE4xhVsxra6h45otgMocvtKpEfqUUVfhL3g6KMkzVYCrEpxSOpmGUqLC6", + "Mtza9xM+3pFzS2/xdBKcTDF31KIo95jiI1phBxBVGtnlIavMH7tu1i9DBkoeicUQkjvyDIKGbbkI4lf2", + "PAzg9X3nzV+1wk7T/y3AyO0nZN557q7Sv38z5HUGV+n84VP/vPP81bg4MTgzuvrrLBEyAAuaD110rTQR", + "Q3FIBJ5Y18nSRMUsajm8d2grGBDkCioMmSVGMoiI6VeEfv9mePdx8A+NsC8mVZbTc0g01GJGKUOGPofu", + "R7gcNNa61CVx9e4BLo+cCfOWwg4zupGQV2OB+VbOfRwuVFxIIXK0RgrmFKtlT2PKZustkA1RXpxQHVlh", + "jDCOoUsy0UFCR7w/6d2fmQ1KulFZkeI46yIUHeRWara8SSqh9XRYXhU3chd7pwXGyzkpDBqpdKjOwO7q", + "6M1e5GUiaw8Egyo/tyQX3vbHw/PtSgUmiPcAmxSO7SKTrXRjuLwAs3MlyUgxqY4m/Ui97prWTy6rwB6Y", + "2Sbh1/DST1RT20pVDe8zANRLzxQ6IFg6f46vr3oYxgj46D/s6ZGv7GglpbZissIxEsaOCwichTH6j1ru", + "tXx2QBhUJbDCBCwi8VCanrvcaR0G9i5c+1WfXBymLM20qUKuch+Vk7F32eySlo7iTJeFGS05lTHTRAFG", + "WySTf0fBTMi3qyZKjPA7T0HN4GQWEBBFPnILuYfWKeQuFrVWKXftvF8z8bMHNmMpCA336vLGGtLT1lN4", + "qhcWtpGRo2YPa7PWWWSUU4g/ZbR06jgJjrZ0kzVXcDGT1Q9SMb2ta16RmiNO67z9W1Z/y2ZP90TJ6iKk", + "hSn9dwOLVpUdScddCF9A1wcxICLrjdkpQXA2wo6XdXF+IXECf6UHeBSHsxgsFuzq9Ms98DH8ddMOC0Yd", + "R1HWpKrDFLYyPg7DTLcJhaJi2xtYAevG3qRgrU7EbzIcZmShstFenLpZSvb67LufT41lfwEhcBEZ1F7x", + "UZFgxaq/mpRTO6kj7MuivNVIKhbQfbnyw8V0UrrnOhIvHZaJxgbTzesZF9CxRkXjbKR94ITK2sPp56pi", + "i/3xeafbuRiMzw3L5fW22qfBpk+DHG/beRmMxdhbfhikoJtMPc1lJ12QXm4yH4BPFbnB2FVVWvDqN2aQ", + "Nl8xF5l1ZrxKbRqSGEFcv3z65YL77Bir+NA2VgY7nv2LGWyaJR2T19Bm+Zl5Ew6cOrW6Zxmu9Ze6dMv2", + "QqRmRF/HFZIgt5xgrGFGMTlWLpNYMXuYPvVYMaPYeHA1uZuoi0nXcMdPyFL6s/PRoD8plFz7OLy54R+v", + "by8pdiZ348HVhTKy/uRRZKylqdk+1w1GAY/cbFJqADaloyxlbKn4RkCQv0qds+piHc3KcXAkmJnyJkQB", + "4VGK5R0QtKiVrVlqNn3wN1rAVSPweCNN7jerZWgOYu4q1nRnVdRY3kOYCpUEJny6ldlWrVzQVJLTu51V", + "pXssQNgUI9nSNOSeg00RmamQyNL6nV9/urkcTErZ/CqSFOZfu1YrY6Jc/vMHdTbNus9bTKMThtMS9jeq", + "UKnvhWYNU7ZiA2H7B4uap8WaW3D2npTi5Alg4dbRIB+Al9eY7NygNVugjJhk9XU1w4mvxaG6DgqcBfJ9", + "hKEbBh6203HrPGELszi/pCH7gEBM6G+/1pePt0I/HV52s8d/nR9yBcoF1QuvevljBAMQoaOrMLhKfB9M", + "ffjnmOXNSFv10CIKYzapcMUvN44AveJ0ZojMk+mRGy6O54C4c0h6HnyUfx+DCB0/nh5jGD/C+DgE7Iz+", + "1gvEWJ03zNC6ZhhYshhH4CmA3nklOyo2ct68zJhVubvLA/JvDSnogPaElyRganhqeLB+wuKdU9lZq0Bt", + "4b5nURpLw6FbKo9VVFSzegWG0ljlg3Jdy8NqG7nB2S0eBCov78MAw7j5kYdEt6YOFLbvF0dqNdodVSQm", + "VkaaNAOIsNHI6815GNyjmTbfSHVx2bUqP69AfIWYI2twcuWTyzOJyHfNROsUzlLt46rW1OXWGxkNpDmv", + "0nOmm10gCuyqWn/yrJCv2MUNQblHJ/0WfC0q9Nu1ClUbYDelFZcCilPgBSTmC9kELcTT/RYtsB6MyNyg", + "99JPOWUCcS+wJ0BgfA98Xz/kzhTRteufbUeTaCg4uU9DQ2TRU4R3tEfXz6bQaKzrG7grtkrLD6S0rOYM", + "p+oAaxW25MK3cMRe5A7qVQ7dr4Uj5CXPUUpNLBF6o+NUHH0bO013lgSv24liFMpaKRq/cfHVREr6um2q", + "Plvj9ita10f858btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW3l0FsGjIwFtBWg5XsAkplh+fu", + "IZDN1kvEtIGbL0/zm3G1NRcrt/JwtQwXVeIDv6qsqQRql5k0Qh9NIWn9myHjCgXJ+dBCHRHMIfBgbHe6", + "87bFTRTT1uJKmakr1/G1SkT1FYGUDyTtpoHmXVM0pDJOLti2qIRaZbSiS53SURhCdWhMMFWRTUiUX2sH", + "KqAsHbUmt1U++NSfUR1vvlDxNv7QP+106X/OXv/O/3h9etbpdj5dvK7GXhrPqskiq0xkHxub9mIJTN3Q", + "s6hXlxthIDsxd5pZAEgSww9r0zEd2knH0wpMNAtYcSU3hobLK2bfGBumsgzNAqsJigG8KaIUPOlXXASt", + "lkYGCt7TsOLB/2HlCscDFhTD/7gdXVaTx164zkmdxtIhJtWBTWmj3DnwfRhUOYU2CNGrdICXz+6FI9GJ", + "JXCW2n35gFa29v3gajBicvP9cPLh9i1z8xsNbwbMQ69//rHT7VwOrwZ95nz3efh/THue3WA3H4Rd6aXS", + "3LdDmilb/47Wv+PH8u9oXTDKDydrGmL3+yHhYOzYDd/Iax6lNRZv8U69ltWbtc5M3tm1Lf9EnXsxTl+j", + "VTulchpeQCLLKBScfJPA3itBpGDAc1BvhVFj0Wn7d2GsgUc+GLEMHDZhP6xhpozkvQ3WD2Tg4ODNpZOp", + "deAox3J3cjiR6JaQlbc2rw7kt9eriZ7ZQj1LdcoqYF/q1UXVjho8uxgwvqknmC86lw+JIvNidhT8V3BM", + "UqNE++9FSTetSi60fl5oYqMlFhuZPEWRCq3ym8SGNNSybxL7jQxtwiBCx9XtdQ4lPMmXufjAphaJ7UwC", + "VK6KDOf0FHOG904QEieKw0fkQa/rACcGgRcuZKcn5PvOFDozGMBYXmNU6jrbGsabo9nbTwJcbW92Tcop", + "nLXIplLLbLrYqeUlL36srC+5LkbGFJf2O2DYN/YkCgIvq/AY86FWu/IvIJmHXqPVCtA/8Z6pbn8eegaq", + "/TCZ3Mjc2W7opRQsDT32+QbuAE84wGbOTfzVEuHVJCRQWXPOZ4Yq3to68ZiWAlamnU/p1mXGrkmn27m5", + "HrP/3E6YlmQ6IXkQFq6K0MLiTYjXYXJB4EQwpnR1ZF8RT5iV2G1Xm3WLUgJK8/kBjNEsgJ6TdWLWoNvb", + "4YUjSHr3tzwfTKGPq8uHsjaMzHM+IVw025EHF3J0HB0afYDJBwhiMoWAVN3Xc7vGqsGyOg7Amcve+Zvy", + "2cnZWe/0rHf6anL6+s3J729+++Pojz/+ePX6j97J6zcnJ/ZpUgBnMHpkDzABU58ZwPYQ0u2fzuZTOYYu", + "TKuSYlN2FtqGR3/wqnRhvApJjfJzaagqFtV8smqeuDaTEnayXk4YqLvYALLivFrokoBu4TC4D+24Z6R0", + "oEeTH5LsgrxKvWo+7Dgbh7luq8ih3xzwCJAPpshHZMmO51wh3IzIf6EQ3bH8t71/Jicnr6DzXXb2YVeU", + "VH7+VZ+n1A9NZxOGCxDNwxg6tJEQQysSzViONWbz6UL5rYtkZFOn2W/OJ8PPvCB3+udN/3ZsCDS2iW7h", + "e5RGtvCz0pgRTJze/DwpAFlvwOO9b+v04dvRpWb4puoxa69VbZSjonSyV2bulbmeaNdNOwtVFNnmxbVr", + "Jq9OVFqBh5d/iTVeBFIgR3lRVqiwDYJZIp6xrIXc+OIj5scu76wUqC6n19GrakK+Dr6RGGgbYO/BPGxp", + "cQwiVSG9vuyzDAM3/5h8YI8ik3/cDMbno+ENy5dy+/YfeuNOUeiWaKpW6AIuCOnQlNIKuq8UuHXxGGlD", + "Jwly4jw3uKZGPytdbqoNixbJQpmkydC64ucVnFE0qo0Hl+8+XI95qodP/as+TyHzZfD2w/X1R+NesOO5", + "bAJW16aPY0p/sXCT7jYoC8sVEVkYVl9O9O9wajii6BcdQFac/mc41R2JO9EojZiTRQQ1ajaYrb7W1DYL", + "tBe76uc54WaY3e0qVyDet5pJXOUpTSKz0mauOWHT0A0DD4knFn7LczWpTmaQKN9Z6XmN90cgk57wHHwz", + "SLDwXE27OjPaN9UaFLu9FmGM9cckBgTOarODKxBe5vrxstbmi0hZWqUQk3zh7GJ+4Vdn9eJLTl1cTVeL", + "1aotGl7o0iCmAA4vtDiUvT+iIGdIeXd7dT4ZsgPr4nbUf3tJldSL/vtKAUkHkZpIIwpms2vYS37Xqzdr", + "xWDuWDPSX++eK/bTmDuKMclHWBVOSUICfB3Fpjz2AJcGnyI5PCVLu4hNeTsHDo6gi+6Rm03i/BIBjKHn", + "PCIg/NV/1XOFERENHM7011sSJ1Azft37req5lRpgTk9OToyeWNph8r5TDd2gGi3o73AqxZjtOW4o/LB2", + "dDM/EXdtpORzC1vPy4CQcybapGOQ6vOh9Q4ylxp5u2ww+ETpVXbXaaiSGB1+1skdng2kuvIoYH+tFiZ7", + "cldWnH7sD4VREqyRV7k8yjsE/dy5r6btyGg5J8UUyVgzyVg6M7Wyu5Xdrex+KdltmOMHFO0V3pAriGY2", + "2pDAhdm/0nBfqe9srLY3ZqnQqhPurulxlmVb23gStQ0MaJDpxZS8xdwUYlHdEiKVUeuop5Qp9mZwdcET", + "xGapYjVZgPM5Y9P0sm/75x+v372rPSXZtCvdm/MCxUyMk7w4KfrbhMGNIvlLsNIGY3cOvcSvCIoydF77", + "OPpSzJNiKWBqNhvzKupGL6RcepYtsmNVPTJcuwijkYBlXG5CR3Koc96xTgstNC/NnzGENrl0VR5vyXTa", + "j4K5tN8kjzbPDl612AmY6dDrc5VxfZN/sOHkKsKsyyGsoh8hFM5jepG518sFLUtzvrxDBm6sm5A532tn", + "ZHLkTjzebnparF9hc82ggDeN5IVpyMUqA6f42axyz9UtPfoyDexOvEI0RzNPMWOUp5t82aoCQ9Fmiyyb", + "e8Kw2RD11YM5vdyDxCc3lVmWRCNjtiWrRwJxi/wT84N3YSiB9ef4+srhQJfDdtgIWica+Sz4Qo99Yexx", + "b0wLNGChdkzQAoaG4jiYIPdhaXLFod8cLJ5V7F4SFXnRgG2ZDvZ4Wngps8Kx0mfMkz/pUP6YUbY5iavN", + "Ap+U92zbd4vGyXKtr4FyWZIwcgN9red0RlabfBtqQp97sSe7Qjh3qMgehQpVhWMIub+KsaTIAnyrafHU", + "TNk31RXhkR8Jlb9MfnIIpxDEMJb5TBhG2bHCfs42ZU5IxK49YfiAoGyO6K7yn+Tb+ZuOCGHO+orUNrR3", + "gkm4sJzsmUl87haliR7gszj9myErd0WYTSz/a0qIndOjk6MTRsc8iLvzpvPq6PToRMRjM0ywmGtflImd", + "6QJk3svnedoqgBg7qT2GbjqQxU06l+L7e4YGGdDAZjk7OSkP/AECn8wZil7z724YEJFUQ9STpk2P/8ac", + "r3B6ANbw8SCOQyqFn0v+qVchSdeRI47Om7++djtY1nChq84aSp+SvwTM7hy6D52vtD/DXwyBt6xHIG2G", + "qjA4kg32HYVswQ4JHeC6MCIOicH9PXJrMZpioBalj6fHwKciJZj14AIgv8cekvHxd/az+tszx4sPieb2", + "dMF+xw5IM33R7g7rzt+mS7vQpy0GtAFzteAjMJ6JwQISpg/8VeHkU5rBEXnOO294HoRUaJSW0lGFGn8f", + "yHZsvZq8X0v09JvGkzBxXYjxfeL7S4ej1MulSSsh77nb+W1XlNd3FsCnWICewzJoeTLsiIPxauNg6KB4", + "F8ZT5HmQ3z4y+uZ0UkVmkuInrAk9rL71YqFysA+8b6erIYyv7NpLXE2WdH7dWofE+Qg/Bokzengbcnm8", + "EWLg2OGbVkBcGrdWJpNKbJHQSSTO89h41ov9jSxEuwQd7DkxwAFtxYClGODUsj0xoB6QEeqR8AEG9FSU", + "f7PTMAp1KQ1G8DF8gA4IWLJG1lp4a6UzFsREhCa0lTTo0O42UiId3iATJKx7ddzFbHmCzhl0PzZR4yZU", + "LUiHbuxE7Jwk4+y3KkpOtzxHwa4fJt6xekM3a9ClTHHy2sMGcVCACQhcWCLic/pZupeYFevt45YB4iRB", + "FnCxLwRWo7VzBKvv9WLrPykvbN96coheGHFnF3GiKfvNzeHH39l/n6v2m0op1uqotKHMKs43slYS8STR", + "JuWEp3DcpRDa3GaL1Eo1hzevofIoxBrHBtuxVrblSFzBTEbeHMUVUo3Tz1czhR/XiTW2LalUq6H5i1SA", + "/ex0f8FIuKX9/aL9BVz5DDee3rs7uEXGtSY0lR6JB3KQb+IIp2McMzs93yVs3PFLhOkFyHdyrU0bTFsP", + "8w23ttt0LrHjypQNN19mwMmtbp8IId16thGFTSjvf26TwwCRkErz4++c45+PozicQvPlUr59OiBXeYLZ", + "dXnlilwuBDPDp1PfhJiMkuCGzWtvmzIdeqnk2vGpV0FQ8Bt0E2lbYfg92umpcBUSVoEgjNF/eJZ6kdOI", + "B1/zKM2SmZMA5EPP4XZ7h22P807I82G2rfqDI0dm2Afuw/F39h8LK74zpg2VAit5ymFfRXIoe6N9bkwj", + "8TAQ99I6n8fJPqk2p7sB4zbISJhP/Ho3E/OcYyx1I/D98IlOr3sRKFKtFL3s9yoVixNdnmMCfPwdB9iK", + "W67GqtQv80uAG7BJfjAzo4iTe+/YpICMllH2kFFKBJuyytW4klECrGETqbgo1ia96kLnlVfiEos0fht7", + "Mf2jazYE8MJMK1kCFBjOXr/OAXG6CR0oikP6D+i1Z9gesabpEsnqNzggiiS1l4813qbAjwRMfXjsgRk+", + "TlO/Gy+NmN0aWTuHzAFxptAPg5maVSBNMw5m5Svl59MLwMrNTkQJ9XpzmUzwnSVo4Sm3Gcv8O4HxMuMZ", + "D8zukFd9zG0rQsRK7hTgfamLjzX1bqwG/gWYnYuYL332sQo5RKeUr39s1p/bStjtvN6V8KO3ULSIfLiA", + "ASnpBsx4IekgfToH+EErYVjD4+/0PzXPS7zSxXTJ+aYoQOgElqZ2No7x0KeA7vjIB4TARUREXhaDUBCN", + "OiospViobdrxCzU9GpneGFZ/dv78jd99tj/rRK2/TzWF+zDhSZr2RERk/FwSEeY7A7ERIcd+OKvTVfxw", + "5vgogDLzkYCjKFEuw9klCng9lkOUKiLLEwlFWt7p0iBZeBpGLTQoIKyoZDno0pA8NyYiNXbozCChqGZY", + "NsyMEbc8amauSN1guDelVQWspk4CgvwNTN13qLzrEfiNOBiC2J07bCalxnPF+lkHnUivXiujYPgI/V/w", + "r3QiFLh+4kHT/tKWuKPVdqsFvmQBOoCtcuvJ5DYUMBalYqY89vluurxLO+WgtAKulFPH6pC12p49OHJV", + "IdRAIRZRrO27eV4rTSW/cuxchrP1Tx36/70sdNj8uqoUajMePGkdth/g6MEPKDIx//09hhs5d7Z60m1f", + "pc72egUHmfba26rVORmnkzDrq9ishWKid6F/7MFpMjMb6QePwE9YvSfnfHDpwG9RDDELqgUzgAKc1U8T", + "9YE9QMCRRh6eQ/+CTXUoLgWbj2j5fHo+uGRIqAlgYZjEVBSyesFUTOiRv9M4FhV8mXaxRtRBQT2eZg2t", + "XqO+xE2TWYnFFJ4/H1yaWd6K1y30Gv4AkBc9aVXjIj8302328Y3uR9JvNDdaacx/gEusXJSM09J2za+X", + "jAxExH3dxfI8DDCiV0lBYuyRKXRZ5g3PAfcEivIT4tq+TWNDNSxTeB/GsBaYTZkf3vGtIWEOGhCzWnSh", + "i5gEfUJkrr7FFctDa+DL0koYdnbLz2T268ql8XcWgLhzxJ4eXRgTgIIsdL9qnWk2PriSoaRQ+N16cemW", + "iFVOl/S4Q7HDnyt1EIuEfS+6LdOlk2XIzXzEWTG19F5isKmUEwhrF6Ip5iCneYDLHq/IFAEUY+cXDzLB", + "R7lv6QDnX2/+9WtRbFU6QdgZtrAbRtBKHvKWtutirdeDd7t3VPv7aWuBqrNApbxhGbbRQEE7ZsewpZbG", + "z3YrTe0jXB6Ksrb1MCaJi6aMwNDdMoOOGRyhPW6BIb4/nvYaBK4y3wKC9f4FTWJY99iv0ASTxNSBMqfY", + "n/aA2khoIW4SVphSjhVnch3H5pgSLWvPKK6StuaEfTUnlKovWyjQtbfPyilKV0R2GedzHq1faqLZXQEn", + "UwyJ44LAQyzPjKTrjd4eqlbs3GLoMTbisBB6PS7DA4i0ubK3e0PRjJ1ePBTWbiDYpYhpJXte25J4yWQ7", + "x2+VrtU1vO2cs1JDDnAC+CQGNopm3vbnfrxhKODosHnAYe83KSk7rLATt+rv8s1GkEcd64myUwrA7ZP0", + "rp6kr7JX6BzDp/yZ8qY9z9trceyCxf+2CW8EdZKiceLO/VLjBLciFpftybXoL1spJg7ztmUpGmQsZysW", + "XlIs2LJ+VyFMevRXhGKkCrzZYMJnO2SLScrPPzkXz0LSHu5Gi8kKZ2yR0SrTBNcfmwce8Jw7NtMkuy/J", + "cNu4AvBNWvkK8ALJh63lg8w33MqHwzvlLZR95tu+yIrVVagFQjLKQGAnTgJH9KzOW8w9KC4RJtyLQtbG", + "O1SZVo6EUtBQ459kAejawVH10GzKQalom2XW38Dj3jrm6dOaZuiFPF0o3LxqHSPl/4XVlAMGoEWVO9r+", + "Tra+Y623SmxZCgT+xsdcpdLKu1n8rSHZAG+Igtkdr+G3I8j7Ggeih96j8OmxeCTIPInuFpWuRC9rxKaC", + "bZQEUqI1j5tWpWib42B/ApjZ3izSg8ouxsL+xI1CFBDLc3eBgoRAeh2Xf8UQPHjhU5AexQ2O4feQ3NDJ", + "D/0QZgee9A1WQneEwbrTVarUn52cnfZO6P8mJydv2P/+r0HuiO79e34T2cQBySBNPYdVUEMK3xrA3qMA", + "4Tn03rLBm4O7fdmYI7UVpCPjk1Y+7ql8zO/OxqUkPnZZKXBzFBovFZ7mo9HJO97k536gZChgqkpNgSSe", + "4yt0XIm0nUaRsUl96PE8YbUvk7J5mySqjZYtyaiCZNi4ZIph5INlVXEn+r1SMvEmP7Vk4ihoIpliibRd", + "SiYOpq1gikXrVi61cqkklwpyYYNySaT+tPG+lenV67xvRfb21v12n91vObk4dFi7+DXW/oo2XyUYUtDE", + "OB3F1t4qic4aUNGhAtLqSV7cw1VlnwYurikjt2/xeR/XFDGZ3BQoXtvL1VTEIt3E1s9V+LkKfDR55ZZM", + "+UKerpJGmri67mPy85/b17Wc2dyC9xuoTczdVfzDzt+1VmYcuMcrnVy+PUoWrvd9zbBiBna3dmhb/pf+", + "rC3v74WrSy17d1Vyq3FplfQrfFqFemjg20N2ay0owD8aj0pv1ZZHDe6qNcckDOgp2IsBgT12A6WbK/be", + "ksvq/Flrj8UD92jdLodtzzv1x1XcpYtqKxj2SHHXyIPVT3b9Df4mxCy/BwrccIGCWUqvC4gxmFWc8CPo", + "QvTYyqAmMihIfL9E+cHSicDSD4HnoMABwdIRq+12CPxGjiMfoAKlFafciQyxyEyaw9M98DFslQtD3THO", + "eBp2W5XDbe7pwme4FydB3RtHPmtg7StHliWwfenY/7ylWGRytHrr2FnWR+aHD2IfQcxyXUMr8LYYFOAD", + "0gSUjVVM2RvHb8tcNQcSrUCBSOPobDLswHjLLv5f5pDMuQAQFWqci/57TE+vMPCX6u9p3UCdQAr85Z1s", + "UKuoTMPQhyCwiOnIFZG0wNkLhXdoSl0a4zwsMvu+WLyHc++DGTtqnwRdhDFzwFDJIL1fgsBzwoTQP4X6", + "iKn+SBtIXfDIuYD3IPF5vvt/UXr4l4PunSTAkB3juuWLme7koJ1KEtpZPb2mL8Ct09C+1d3IaZSqoit/", + "H9Hf13yJUjXcYw/hyAfLHnOXqNF3RVs6rHCvCO8rlOBqHfiCD8bcLg5aH1ZEK07fsXJIEfGSAn0CdWZF", + "QJGlL1JueMsmeC0JtKKrFV1NRZfkkx7lk2rJleNRpj3oE/5n6e0qJNdADDb0Dldwtffc9p77k9xzd3ac", + "ZXKhPc1+pNMsd3rs5GQT12tz2M+EN5BepfkLe8XR1bqXngrUKUipearOkQIJhf/mrt+oFa0ZEoB83MzP", + "VKWQ9r2p6PZZYKANMHien5nPp/JLTSmJPMmBwGPOZOn5T8L0KimKJf2z4zGi+GfHiQwP0hn9WLqd5WDg", + "ts0Z62l4BVaWd7C5DFfgsvYU3+NTvBj+ZsnQ3RJBr8Dix6JkXBWnE57liyTMcJTn+6NaLh7LmnQr8rI6", + "vaKu/5isrV4/W5beUyev8zDxPR5PSy+SOs1lj3KT5LgqLRD5IrKGJXuyKLHLwnJ5kDu31NtfHdIi89ZG", + "r5+nIk0mVrUGkB9Xoq5U1bEVqq2eVJRdBC1QMKvXlkS7xtLrPSQTMcXB3n20MsiDEZnzjCU8q5njzpHv", + "xdDkusE6NJR+2xckfHNaSXLwkqSKPzctXmAkZIr88/kYxO4cPcI6LUi0EmDS7loRMiYwEu66fTmwhfiQ", + "4xmtpxLe1nV3dY1smzJJ7LvYcyuplE8q2dYF3X0+ppTrCjmZykIqx/4K80v5RLefyqYq0ZSycL1MsrmX", + "idL99vJoIGusttLoJ5FG9netVhYdjixSGH/7ksgPZ3WeUn44c3wUlHSjsjn6MpxdogDaWoNaMfSy8Uw+", + "fIS+lcsQb5mbuYoZJB3QXu8Q9D1jBjlID16HzabAUVHMhHVoCsiY99KGkgAWKBDGXtX62ee3S76WhpNf", + "q30NeODTeyiGroh2r4DiQmm2CiRZ/+0eUqo0aAvor5uCLpXCyllwGc6aHwPC0agitTnzgMDCk8jguD9h", + "P5+rji+bdszhg/OJ6pL0ctekl3HF4RA2cr4RSP2xaXwFr5uU2NLstMKfpkjkOopOXedqTcbcNUa8sFcS", + "eNOETGlgh5jB+OSzG2+5l6V4mTKppfbd3jY4MXoh5BcN+I2fwKVCGrbMlstoWp2DKeCzoWBWzVeHk4lp", + "S16nHAFNDrcopogkiMdlvEDhzvacW/+cE3yyAutVnHfHwKeEEcx6cAGQ35vFYRJVPpxS5U7eAgV5sTEc", + "NoAjBiiybp82GdAW72mDQ4l02v5JqENMw5JTxk1oeSf/mlhBrY3OMeurT3muOsb46UMq1JtbATd2Z10J", + "5Y2udqfbZe8VTkANDbV8rb37ablts6fkMYaE1LkWYbZ7sosju1RnM1DIBQWzsehzIEl9d3RMKohZ44xU", + "96RlJc21ToOmjfFRhHokfIA1yfCc/s3Q4e2quaYfoQlt1uqT+Jj5Fd0MGT6wRepIHZ9I/6jWhl5UHilF", + "ctQqzJD+uE4plyCjdjtib3VEhgBJ64pauE0TRnHSlr82HDabMVNDBqs6cCy8pXh1uZzLlCntauY006Zb", + "3Wv3hAe4tHJOoO2ap59hZPARLm3ymmQwpe7Lwwtsmw+Ty4rGAEqX6OHFiiBmMWhrpPKxgXCUBDyOUhi+", + "XsTVg+3nyzh6sKn3wM1DhUN18qggliyDEFw6j8BPoD6PEPwGFpEPqch+gMvTN6zpaadL/3XG/3VGxXt1", + "vqFPm003lC2DJy5NMw5V0zlrPDz8TEMrRdq13jWB2edSUVoYctc3IbNxDTpIewVgCGC4qDELi8TEL+Le", + "wymhic0X8h4/u3f12X/vZtaR4E+hnsJvLoQeNJRz5HvTgM/rLybH08R/MLvTvU18UccI4kwm4EqhQPv8", + "xIKBLr+hcMAvKR1wc/HQRl/smXxgbKoKCbxhKeGCwIV+hdst+84NGUri7JyKa5Ia3K2Ej/AzKxQMAfYK", + "hbgwxDDywXLjYiNz2KL/esouy0OenHhbRTzkD+H0b+haaC4MaTDLUdIKqb0VUiNGqduRT8yMZmlj5bY5", + "CzvrR7hsn/UyY+NKt3WG7PbGrruxO8L2u0k+EKeB8ZzmPIibHc0jecT8rEczR8C+HM2bMatx4Fqt/ic9", + "ML+z//aeEJn35Cdm3a4NPwIE8MMzqDQQXgAC3kPyBZH5RLJ9rfyQ7KMXHyWQd/12+cOf8nTTVknHwKii", + "PeXzvmwKZqx5t6sh8mp+RsEjIrBpwITspXcCHbKvre4rfT8VfKzk9Smx3fp66sIhMlrcUgwEn6CS1tvn", + "LCXqgaPELtiB4/ZFIxw4uKsENgjC+Nlje8/OdqT1AmL3zlXkW51cgAGY+rAXAwJ7bEzKHoLXVtGLhRSS", + "P/T4v5+5iPEhgWVhc8F+x6kZyUbQ8D4H672X5/pq2HopOg795K+VLZxC9lm25NiME2FGriZdNL+PtRH0", + "zTjhcKLoD4UTthvov5pW8GKh/pacy+E7GM4VIfiNObfq5FvAxZQxX6MbpOylZ/FP7Gt7g5TUqOBjpRuk", + "xHZ7g9TdIDNa3EyQoBjv+Dv/w0IJdIAAwrmPw0VdkC2nhh9DFRTLNsHGP++Ud3/bCu+uogP+HFy7R7lq", + "rwypaVMmzW1MA3nRlYRskUaqNIlZBPwYOvBeiIDtKr98u+yUX4GOPUl5ZSm9NHqw2LdWeL2w8DLKlRWE", + "V5XWE8XhApI5THBvQXVQt758UdbFEV1SH7y6zJQ3addPYrIf4qJA4DdyHPkAFaiiOFKTO0AZyy1TvjRT", + "Ug7Q7MumbiD/TmACrdmQtW7Mgf9Dex0Q8x12ZPMhBatu3x6So73VMlg4jzDGKAxambhPMjHdnbJElJyz", + "qkzMnvpsXL3j9LGxztd7BAi8pA3bvBr7XJ12EzkYajG5zUwLKZ3tQbaFIiy7KquR57UGwQQKO7d+hgUr", + "uIqbTNwyb4tL/uuqElf06EWhj9xlfcpJ2cHhHWwSTkpX6BvWo003eaxDy2qPRoXdaB+Pdp61FfvAfahO", + "NDmmTZwnOJ2H4UP5OZV9/sK/ts+pPMekipMmt4cCqveJHXZU8fg2AAmZhzH6D/T4xK93M/EnSOahxyp6", + "AN8Pn/TVlvkGMT2Qs4B6nrGPazHiMSYgJkZ2HNOv/By77idk7rDLSpEhb7F8tmEAXVOEsp6HyJmvTs40", + "eFC5h6FMHCs5rMwh8ITXiB9ygqmxeLINh24SI7Jk+HHD8AFBOigrivRVpQeG0vyMkhDoDqxMB3V5f8dX", + "4yIBFgRygFs5LOTw1XiooqqBJC5iuZXFeyeLy4yQSuKr8RrphgsD6xisjcZgCMjzV2WW4c3RbH5S66iK", + "4q62DL1HDG3kPEuOrjxRRZ3O3i6erETp8EN7udq+uUCHmGY2g7SedW5n2keVfXhUSfdm08/Muqrqlayb", + "FVB3pkvOUIXTmxPigdjxuvta2X2bEkNs0YryoZUIOyuFqtLiE+D1UOtEhHqo05/oRq9aZbtaTtTmBOwT", + "AheRSG7J2iriwyQ4Di0ZYCtBqlziEWa+0kKEcCLw9++C8MKPeHWMsiuGjiHtWJE7jCVZtOVh1rxl4X3M", + "ZhYngdiqGo92FEQJ84fgj7u65T7vhabS5jKrkC9sw19CoGRrqrQF8GbCWaBOuLyHZMyHbUXLy2kHzbL0", + "GiwNYrj2QrHPFwq5S1uRGgTghx4mgNQYDAF+YNWghKWwxko4AfhhzAa1FxHDix/RNpgiogGHanHd8uge", + "mAFNbLCL9EjCa6b3FMYPVckiMgdso0tT682UBZNwVHxhSKUIqarqSZGRBrzwjo7cjva5bd/ezxXyXz2J", + "oRjExEI//Tt5jn84NnZUjFczs9coBaHc2pZz9++hXGW8lQ5LRhXVD2n0hOTCu9pLPjsbfvrDMsNEW/N6", + "IxmqpfaQj9Fb3btSIpobgprXolCr/2pKUigle9vCFEphCgUvuMagm6uv/HJlKnRwW5ezV2y9OYJpL6l7", + "Wb4iv0flcOBqU1ITgfNd/WedH0uOE2pPYEGmh+zWUmB9PWgqBg9YTRDbtWpmgdbNxRzXn39Bqo/p7+Zp", + "anV+PmaPkbWPSfzJkjO0CvRRDV8P2egtc788c2dZTG6UIpQcxnXenfI4YtvdmrV3ZNb+ouI+sMkfkm1S", + "U5VhcxIHz0EEt6RHjNnYrbw5GGWCb1irUfxAGkUauyJ8hiojQ0Wldsbivp++j2ONrlHF+ixwkruyDGRh", + "v1YGbBzAS4CJM7xgCevn0PGB3EFTmiKAydAz5il6dabLU7QDH9smBT1LZflak8j++dasIEvsHW/sZCG2", + "eplgLe00mp8ycZoH70Hik86bk25OVOwihVo69+tVJh/zTGrTpcMm0E8qPpnzOexC7Wofezavb20yJWM6", + "Zm0w0LmMa5gC4s5Ljz1VGtPhBANty8tBeSfhyLB12xfRJOWnkk0/9kSKpeZ7qvSNkmDo4Vzq2bUQXM63", + "29AgJCKQ2tejmvRonGx28XKDj904DOo1EtrK+TucZkCRGM1mte4T53EY/NRqysHkd003Fnl02hkkqUp8", + "VJPG23Rx28Jdl87cFLyrOlVKOyWj+CbT0Q7NpzrMDOUVOXOnS+de5OXdWOpeVYpg+/S90+X2MvgqSsGO", + "c/jmkLGGht4euxotvXTObUldp4fu8Xf6n5781a7MXfkgtn74oIRz4EXv0tWbwMphdPdl7yzr02k3sc0P", + "XKwXp0dTs7eKPEF8fe5WPSauyVyH7J60x5y1paOzPTYPwbDf6LDeiHyoKy/JZk1ntBYOB15rcr/kw7aq", + "TaoCYsINHFa2PkoFvISjjW2vTlVQi0G2qkK1HBBsuQ1RYKfKs+PA9kFPfWWsd1NqDWb7bDBjj8gNrGWs", + "/Q5NZftox4tATJFmcF0pgMUbf1EfM3YEnyZFjBY24SSyXbj62vgsloggwdCq3qJsu4p1a8z6CjuTDXAP", + "KPCsoGING4P0EQVePTQHb0wlaAEdcE8BLTlPPwEsY5nVJXTOTs5Oeyf0f5OTkzfsf//XaKxm3ft0Aj3x", + "0mO1R6Ho2FYjpxBP4X0Yw22C/JbNsEmYK7B8jwKE56vDLPvvFM+bAnqjmN7e40DZEv/TPg0UdcfWwrEV", + "d+ntvAkwD2mb/P3AEaDRgy7P/mpCf8tAiEOuQN2q4a0avns1vNUtW93yRUKg8JoV25kAaiuL1J/vW6ie", + "np3zFFQv8enxWGM1TFuuYj8cy86tFXGfrYjbuxelBHBQnlOtMtUqUwejTGXLyET1RmyzKUhWDJ5aaTUw", + "bzVGsiRhWqvDZrUSgwawXb3keJr4D73ME1EfUfQ28R+EU9uGFBU64uH4J27JD6HMUxlabMOOpvVbs9s6", + "IpVrMieeU0ksTtu1EkJKiLdW+7x1ScHdVWokBW/k/BJD2fvXDYqNw3Gu2qnYkGk6G4gNsU/7KzbkmmrE", + "hlhHKzYMYqN2n7cpNr6nf/ZKOSNrIyD0IDcUGgceB6HBgbGakRbVexsaod/d1uGxGBthwFMzj0cDbdRE", + "SWyEAQ+6QvFBcd82D+T2rn/oMRTbliPV0RS568CGJMuBB1rsvXDZVuxFSbo0qI+akVE57+PLXllqJaQa", + "7PFTKj8HUP3ttuqytClZaXeJSlNoPmeZW6rKWDnACeCTOX+LffoWEQ91OEWv6jOJVOfMrARtR6KRY3vV", + "sDRROdq4+TuVjc2Cb9VaXWb4W8m4e8m4d4VOhKCrovLtpM5SZHHOqUcvj6VuICSyvYarU4xaKbxLKSx3", + "YAXNtEKt23PFVJXArWLail+T+BUKSZ1OvHGRy6vn9dwwCUhNvARrI3ORy7KP4BEgH0x9yKSvIm709oX3", + "kPDqfPiczXjworcuZfyBl4zIbdaKZkpOKpx82hdEg8N0DkmrFZLIs3+CYYyP3SSOYTVnY3474A0d2q3E", + "vbcYxu8hOReDbZHu6EwN6YxB3BYgfvkCxNBNYkSWTIy7YfiAYD+hsuuvr1RUFZIO5clNkjvbfg0ZzxCZ", + "J9NjF/j+FLgPRnI+DxeRDwnkNH1N53e05xGdiNuj3rOhrykuz+XwBQJ/dXJW8/bqinm98rxzCDx2uH3v", + "+CHfjPw+FMX6cwGZOdzJBebnsEQfJiA2i4Ix/boa4ljX5lhj8GwfZwy6hggLw5kPt0NvbOgfnN44+jZM", + "bxnifjh6Q8EjIrC6dhNm0UxSG+YdmNJtdXzTESas71DMtcVTXJ3IypndR1huTH6Brb5ofayymjwF7GWU", + "N9HcEHO0dwxcF0bEbHnrs+84tbCJSUrUpm4+79PZjj2JD84nUgxJBgNQBfXxlevor/WYSsmLY7u09/b0", + "FUNW3aKikj793oy+eJ/OturS08E3QF985S19VdIXx/YK9OWHMxSYyeoynGEHBQ5gZ+NRhYJxyQbaknMG", + "PYLp+PWEtLt7tB/OZtBzUNBen1/4+tzt/HZ2tqt1R3FIaYAZbQcBQWTp9JxH4COPTUY3RTRBwcyBciSz", + "wssIW3+V73a+9WBAp+rFgMAes4FTHZq/1eiYOUxIDTeHCbFj5zB5eWOVYLJwzwp1t0aqGm2aUY+tfWoB", + "F1MY4zmKGtzhlE529zh+Bn7KuomkFFslcP2kzS90KoraS90qlzoVg/UkGQGMn8K4wpUizcVOOziyfZVI", + "vZFjbk9JOp+DYJZOtE/akssg81JEteK8VZqaKU3VrM4pP8+Ma+tTMZxRSRxXXbt5C1ypUqWeUtviewnG", + "PnG8RF770Ngy/WZuSpLKN3NZwj5wH7bySDWmI+/xG1WNJG34aPUIYyxAMLo/0TWIdtIFCsP4UaOlD4P7", + "8D0kn8WgG61JrECaZWg8PTo5OtHlgFQ8j/5Ku361KDc8qVhswduygti/QCeGJImDHPIKNx0qZpMgoPyT", + "TvGtJ4fshRFPOVVmgSc4nYfhQ084oh1/Fz9YhL/To060Ljuq8d/tI9vFQGZHsHSiHfuBWYaKS/jag+3l", + "jRPF8HSVTI3eX6LFVyvmOBZ4tjFTyKbCr76GY4Tihm0TZe4t32zGf5JDz90nBWooZqoyrlCspHVABHbS", + "7WrZc4/Yk1llSlvUlEdT3mR/PNd4X/NWWsdq5pxpxXPcybTKZ1lzxh+Ox3Jj31Gx4tYeWXJKLgV8yQuK", + "2QeZqdX1lR8rCdk+7cBe0PK2ovhz54bprBAYSCTKdhcHZclralB+y2mGmovrMFvhNCkG91glAmtWg7XB", + "vWgvI2SaJNFKAWwD9F44c4QgVoViVoyP6dZpWPac0EDl+hkCxVYMDmt566V5S41CW4exbNQ+e+5qpgfu", + "BYNtXhfMI8M2Vl7kJM1x2a6VQyuJUFQPW3lgVBDXY84aNdGqXB7dpHxdvJTxHtOXDuNJ2aA83j7ws6ZE", + "BS8wsYH6watXD9YDNovDJGJ1PzIQ5EYZQWGdPsJlpzYNyJaFxJq1uOSjUluOaw+1iZXqfzUSXDI1kdG5", + "RWbVaJosaKUcQXspuSYadjlyhvfMuo0TSh3Q6zKu8gGBmKQ8hbBzD4k7h56pOlQm+PdckRJksGLioRdL", + "N6TA2yjPUJtdqM0utIXsQo1Es5AN2OJVK3eSW4ll4VtzQCaYH0Eub1nKSYep9VTBVt7tlQqYkeKqKmDR", + "8W8KQQzj1PGvq3UFZJ5kXB4ksd950+k8f33+fwEAAP//y98GQS44AwA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/api/v1/server/oas/transformers/metadata.go b/api/v1/server/oas/transformers/metadata.go index f7ea8d5eaa..d242a83e6d 100644 --- a/api/v1/server/oas/transformers/metadata.go +++ b/api/v1/server/oas/transformers/metadata.go @@ -4,6 +4,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" ) diff --git a/api/v1/server/oas/transformers/v1/worker.go b/api/v1/server/oas/transformers/v1/worker.go index 2e2cf1678c..ef26410302 100644 --- a/api/v1/server/oas/transformers/v1/worker.go +++ b/api/v1/server/oas/transformers/v1/worker.go @@ -59,12 +59,10 @@ func ToWorkerRuntimeInfo(worker *sqlcv1.Worker) *gen.WorkerRuntimeInfo { return runtime } -func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string, actions []string, workflows *[]*sqlcv1.Workflow) *gen.Worker { +func ToWorkerSqlc(worker *sqlcv1.Worker, slotConfig map[string]gen.WorkerSlotConfig, actions []string, workflows *[]*sqlcv1.Workflow) *gen.Worker { dispatcherId := worker.DispatcherId - maxRuns := int(worker.MaxRuns) - status := gen.ACTIVE if worker.IsPaused { @@ -75,10 +73,13 @@ func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string status = gen.INACTIVE } - var availableRuns int - - if remainingSlots != nil { - availableRuns = *remainingSlots + var slotConfigInt *map[string]gen.WorkerSlotConfig + if len(slotConfig) > 0 { + tmp := make(map[string]gen.WorkerSlotConfig, len(slotConfig)) + for k, v := range slotConfig { + tmp[k] = v + } + slotConfigInt = &tmp } res := &gen.Worker{ @@ -87,15 +88,13 @@ func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string CreatedAt: worker.CreatedAt.Time, UpdatedAt: worker.UpdatedAt.Time, }, - Name: worker.Name, - Type: gen.WorkerType(worker.Type), - Status: &status, - DispatcherId: dispatcherId, - MaxRuns: &maxRuns, - AvailableRuns: &availableRuns, - WebhookUrl: webhookUrl, - RuntimeInfo: ToWorkerRuntimeInfo(worker), - WebhookId: worker.WebhookId, + Name: worker.Name, + Type: gen.WorkerType(worker.Type), + Status: &status, + DispatcherId: dispatcherId, + SlotConfig: slotConfigInt, + RuntimeInfo: ToWorkerRuntimeInfo(worker), + WebhookId: worker.WebhookId, } if !worker.LastHeartbeatAt.Time.IsZero() { diff --git a/api/v1/server/oas/transformers/worker.go b/api/v1/server/oas/transformers/worker.go index 4f35f335f2..1db63ebd2b 100644 --- a/api/v1/server/oas/transformers/worker.go +++ b/api/v1/server/oas/transformers/worker.go @@ -55,12 +55,10 @@ func ToWorkerRuntimeInfo(worker *sqlcv1.Worker) *gen.WorkerRuntimeInfo { return runtime } -func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string, actions []string) *gen.Worker { +func ToWorkerSqlc(worker *sqlcv1.Worker, slotConfig map[string]gen.WorkerSlotConfig, actions []string) *gen.Worker { dispatcherId := worker.DispatcherId - maxRuns := int(worker.MaxRuns) - status := gen.ACTIVE if worker.IsPaused { @@ -71,23 +69,24 @@ func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string status = gen.INACTIVE } - var availableRuns int - - if remainingSlots != nil { - availableRuns = *remainingSlots + var slotConfigInt *map[string]gen.WorkerSlotConfig + if len(slotConfig) > 0 { + tmp := make(map[string]gen.WorkerSlotConfig, len(slotConfig)) + for k, v := range slotConfig { + tmp[k] = v + } + slotConfigInt = &tmp } res := &gen.Worker{ - Metadata: *toAPIMetadata(worker.ID, worker.CreatedAt.Time, worker.UpdatedAt.Time), - Name: worker.Name, - Type: gen.WorkerType(worker.Type), - Status: &status, - DispatcherId: dispatcherId, - MaxRuns: &maxRuns, - AvailableRuns: &availableRuns, - WebhookUrl: webhookUrl, - RuntimeInfo: ToWorkerRuntimeInfo(worker), - WebhookId: worker.WebhookId, + Metadata: *toAPIMetadata(worker.ID, worker.CreatedAt.Time, worker.UpdatedAt.Time), + Name: worker.Name, + Type: gen.WorkerType(worker.Type), + Status: &status, + DispatcherId: dispatcherId, + SlotConfig: slotConfigInt, + RuntimeInfo: ToWorkerRuntimeInfo(worker), + WebhookId: worker.WebhookId, } if !worker.LastHeartbeatAt.Time.IsZero() { diff --git a/api/v1/server/oas/transformers/workflow.go b/api/v1/server/oas/transformers/workflow.go index 16f00c4d17..a827b4aec8 100644 --- a/api/v1/server/oas/transformers/workflow.go +++ b/api/v1/server/oas/transformers/workflow.go @@ -213,6 +213,7 @@ func ToJob(job *sqlcv1.Job, steps []*sqlcv1.GetStepsForJobsRow) *gen.Job { } func ToStep(step *sqlcv1.Step, parents []uuid.UUID) *gen.Step { + isDurable := step.IsDurable res := &gen.Step{ Metadata: *toAPIMetadata( step.ID, @@ -224,6 +225,7 @@ func ToStep(step *sqlcv1.Step, parents []uuid.UUID) *gen.Step { TenantId: step.TenantId.String(), ReadableId: step.ReadableId.String, Timeout: &step.Timeout.String, + IsDurable: &isDurable, } parentStr := make([]string, 0) diff --git a/api/v1/server/run/run.go b/api/v1/server/run/run.go index 2101d2da9a..80665748f9 100644 --- a/api/v1/server/run/run.go +++ b/api/v1/server/run/run.go @@ -510,13 +510,16 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po }) populatorMW.RegisterGetter("worker", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + idUuid, err := uuid.Parse(id) if err != nil { return nil, "", echo.NewHTTPError(http.StatusBadRequest, "invalid worker id") } - worker, err := config.V1.Workers().GetWorkerById(idUuid) + worker, err := config.V1.Workers().GetWorkerById(ctx, idUuid) if err != nil { return nil, "", err diff --git a/cmd/hatchet-cli/cli/tui/worker_details.go b/cmd/hatchet-cli/cli/tui/worker_details.go index 709d54bc69..b0cf8ad47d 100644 --- a/cmd/hatchet-cli/cli/tui/worker_details.go +++ b/cmd/hatchet-cli/cli/tui/worker_details.go @@ -245,10 +245,18 @@ func (v *WorkerDetailsView) renderWorkerInfo() string { b.WriteString(sectionStyle.Render(labelStyle.Render("Last Heartbeat: ") + lastHeartbeat)) b.WriteString("\n\n") - // Available Run Slots + // Available Run Slots - aggregate across all slot types slotsStr := "N/A" - if v.worker.AvailableRuns != nil && v.worker.MaxRuns != nil { - slotsStr = fmt.Sprintf("%d / %d", *v.worker.AvailableRuns, *v.worker.MaxRuns) + if v.worker.SlotConfig != nil && len(*v.worker.SlotConfig) > 0 { + totalAvailable := 0 + totalLimit := 0 + for _, slotConfig := range *v.worker.SlotConfig { + if slotConfig.Available != nil { + totalAvailable += *slotConfig.Available + } + totalLimit += slotConfig.Limit + } + slotsStr = fmt.Sprintf("%d / %d", totalAvailable, totalLimit) } b.WriteString(sectionStyle.Render(labelStyle.Render("Available Run Slots: ") + slotsStr)) b.WriteString("\n\n") diff --git a/cmd/hatchet-cli/cli/tui/workers.go b/cmd/hatchet-cli/cli/tui/workers.go index 12a2b72711..6a45594deb 100644 --- a/cmd/hatchet-cli/cli/tui/workers.go +++ b/cmd/hatchet-cli/cli/tui/workers.go @@ -512,10 +512,18 @@ func (v *WorkersView) updateTableRows() { // Started At startedAt := formatRelativeTime(worker.Metadata.CreatedAt) - // Slots + // Slots - aggregate across all slot types slots := "N/A" - if worker.AvailableRuns != nil && worker.MaxRuns != nil { - slots = fmt.Sprintf("%d / %d", *worker.AvailableRuns, *worker.MaxRuns) + if worker.SlotConfig != nil && len(*worker.SlotConfig) > 0 { + totalAvailable := 0 + totalLimit := 0 + for _, slotConfig := range *worker.SlotConfig { + if slotConfig.Available != nil { + totalAvailable += *slotConfig.Available + } + totalLimit += slotConfig.Limit + } + slots = fmt.Sprintf("%d / %d", totalAvailable, totalLimit) } // Last Seen diff --git a/cmd/hatchet-engine/engine/run.go b/cmd/hatchet-engine/engine/run.go index a71f3e3045..54a9ef017b 100644 --- a/cmd/hatchet-engine/engine/run.go +++ b/cmd/hatchet-engine/engine/run.go @@ -362,6 +362,7 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro dispatcher.WithPayloadSizeThreshold(sc.Runtime.GRPCMaxMsgSize), dispatcher.WithDefaultMaxWorkerBacklogSize(int64(sc.Runtime.GRPCWorkerStreamMaxBacklogSize)), dispatcher.WithWorkflowRunBufferSize(sc.Runtime.WorkflowRunBufferSize), + dispatcher.WithVersion(sc.Version), ) if err != nil { @@ -799,6 +800,7 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro dispatcher.WithPayloadSizeThreshold(sc.Runtime.GRPCMaxMsgSize), dispatcher.WithDefaultMaxWorkerBacklogSize(int64(sc.Runtime.GRPCWorkerStreamMaxBacklogSize)), dispatcher.WithWorkflowRunBufferSize(sc.Runtime.WorkflowRunBufferSize), + dispatcher.WithVersion(sc.Version), ) if err != nil { diff --git a/cmd/hatchet-loadtest/do.go b/cmd/hatchet-loadtest/do.go index 43bf8e3348..33ebfe4479 100644 --- a/cmd/hatchet-loadtest/do.go +++ b/cmd/hatchet-loadtest/do.go @@ -15,15 +15,14 @@ type avgResult struct { func do(config LoadTestConfig) error { l.Info().Msgf("testing with duration=%s, eventsPerSecond=%d, delay=%s, wait=%s, concurrency=%d, averageDurationThreshold=%s", config.Duration, config.Events, config.Delay, config.Wait, config.Concurrency, config.AverageDurationThreshold) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - after := 10 * time.Second - go func() { - time.Sleep(config.Duration + after + config.Wait + 5*time.Second) - cancel() - }() + // The worker may intentionally be delayed (WorkerDelay) before it starts consuming tasks. + // The test timeout must include this delay, otherwise we can cancel while work is still expected to complete. + timeout := config.WorkerDelay + after + config.Duration + config.Wait + 30*time.Second + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() ch := make(chan int64, 2) durations := make(chan time.Duration, config.Events) @@ -98,7 +97,20 @@ func do(config LoadTestConfig) error { finalDurationResult := <-durationsResult finalScheduledResult := <-scheduledResult - log.Printf("ℹ️ emitted %d, executed %d, uniques %d, using %d events/s", emitted, executed, uniques, config.Events) + expected := int64(config.EventFanout) * emitted * int64(config.DagSteps) + + // NOTE: `emit()` returns successfully pushed events (not merely generated IDs), + // so `emitted` here is effectively "pushed". + log.Printf( + "ℹ️ pushed %d, executed %d, uniques %d, using %d events/s (fanout=%d dagSteps=%d expected=%d)", + emitted, + executed, + uniques, + config.Events, + config.EventFanout, + config.DagSteps, + expected, + ) if executed == 0 { return fmt.Errorf("❌ no events executed") @@ -107,12 +119,12 @@ func do(config LoadTestConfig) error { log.Printf("ℹ️ final average duration per executed event: %s", finalDurationResult.avg) log.Printf("ℹ️ final average scheduling time per event: %s", finalScheduledResult.avg) - if int64(config.EventFanout)*emitted*int64(config.DagSteps) != executed { - log.Printf("⚠️ warning: emitted and executed counts do not match: %d != %d", int64(config.EventFanout)*emitted*int64(config.DagSteps), executed) + if expected != executed { + log.Printf("⚠️ warning: pushed and executed counts do not match: expected=%d got=%d", expected, executed) } - if int64(config.EventFanout)*emitted*int64(config.DagSteps) != uniques { - return fmt.Errorf("❌ emitted and unique executed counts do not match: %d != %d", int64(config.EventFanout)*emitted, uniques) + if expected != uniques { + return fmt.Errorf("❌ pushed and unique executed counts do not match: expected=%d got=%d (fanout=%d pushed=%d dagSteps=%d)", expected, uniques, config.EventFanout, emitted, config.DagSteps) } // Add a small tolerance (1% or 1ms, whichever is smaller) diff --git a/cmd/hatchet-loadtest/emit.go b/cmd/hatchet-loadtest/emit.go index 571a89a247..6ab93f7eaf 100644 --- a/cmd/hatchet-loadtest/emit.go +++ b/cmd/hatchet-loadtest/emit.go @@ -53,6 +53,7 @@ func emit(ctx context.Context, namespace string, amountPerSecond int, duration t } var id int64 + var pushed int64 // Precompute payload data. payloadSize := parseSize(payloadArg) @@ -68,18 +69,34 @@ func emit(ctx context.Context, namespace string, amountPerSecond int, duration t wg.Add(1) go func() { defer wg.Done() - for ev := range jobCh { - l.Info().Msgf("pushing event %d", ev.ID) - - err := c.Events().Push(context.Background(), "load-test:event", ev, client.WithEventMetadata(map[string]string{ - "event_id": fmt.Sprintf("%d", ev.ID), - })) - if err != nil { - panic(fmt.Errorf("error pushing event: %w", err)) + for { + select { + case <-ctx.Done(): + // Stop promptly on cancellation. Remaining buffered events (if any) are intentionally dropped. + return + case ev, ok := <-jobCh: + if !ok { + return + } + + l.Info().Msgf("pushing event %d", ev.ID) + + err := c.Events().Push(ctx, "load-test:event", ev, client.WithEventMetadata(map[string]string{ + "event_id": fmt.Sprintf("%d", ev.ID), + })) + if err != nil { + // If the test is shutting down, treat this as a clean stop rather than a correctness failure. + if ctx.Err() != nil { + return + } + panic(fmt.Errorf("error pushing event: %w", err)) + } + + atomic.AddInt64(&pushed, 1) + took := time.Since(ev.CreatedAt) + l.Info().Msgf("pushed event %d took %s", ev.ID, took) + scheduled <- took } - took := time.Since(ev.CreatedAt) - l.Info().Msgf("pushed event %d took %s", ev.ID, took) - scheduled <- took } }() } @@ -115,5 +132,5 @@ loop: close(jobCh) wg.Wait() - return id + return atomic.LoadInt64(&pushed) } diff --git a/cmd/hatchet-loadtest/load_e2e_test.go b/cmd/hatchet-loadtest/load_e2e_test.go index dd6bb846e6..7c2ffbfff3 100644 --- a/cmd/hatchet-loadtest/load_e2e_test.go +++ b/cmd/hatchet-loadtest/load_e2e_test.go @@ -4,6 +4,7 @@ package main import ( "log" + "os" "testing" "time" @@ -28,6 +29,24 @@ func TestLoadCLI(t *testing.T) { "loadtest", ) + avgThreshold := 300 * time.Millisecond + if v := os.Getenv("HATCHET_LOADTEST_AVERAGE_DURATION_THRESHOLD"); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + avgThreshold = parsed + } else { + t.Fatalf("invalid HATCHET_LOADTEST_AVERAGE_DURATION_THRESHOLD=%q: %v", v, err) + } + } + + startupSleep := 15 * time.Second + if v := os.Getenv("HATCHET_LOADTEST_STARTUP_SLEEP"); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + startupSleep = parsed + } else { + t.Fatalf("invalid HATCHET_LOADTEST_STARTUP_SLEEP=%q: %v", v, err) + } + } + tests := []struct { name string config LoadTestConfig @@ -49,7 +68,7 @@ func TestLoadCLI(t *testing.T) { RlKeys: 0, RlLimit: 0, RlDurationUnit: "", - AverageDurationThreshold: 300 * time.Millisecond, + AverageDurationThreshold: avgThreshold, }, }, { @@ -68,7 +87,7 @@ func TestLoadCLI(t *testing.T) { RlKeys: 0, RlLimit: 0, RlDurationUnit: "", - AverageDurationThreshold: 300 * time.Millisecond, + AverageDurationThreshold: avgThreshold, }, }, { @@ -87,7 +106,7 @@ func TestLoadCLI(t *testing.T) { RlKeys: 0, RlLimit: 0, RlDurationUnit: "", - AverageDurationThreshold: 300 * time.Millisecond, + AverageDurationThreshold: avgThreshold, }, }, { @@ -106,7 +125,7 @@ func TestLoadCLI(t *testing.T) { RlKeys: 0, RlLimit: 0, RlDurationUnit: "", - AverageDurationThreshold: 300 * time.Millisecond, + AverageDurationThreshold: avgThreshold, }, }, { @@ -126,7 +145,7 @@ func TestLoadCLI(t *testing.T) { RlKeys: 0, RlLimit: 0, RlDurationUnit: "", - AverageDurationThreshold: 300 * time.Millisecond, + AverageDurationThreshold: avgThreshold, }, }, { @@ -145,13 +164,13 @@ func TestLoadCLI(t *testing.T) { RlKeys: 10, RlLimit: 100, RlDurationUnit: "second", - AverageDurationThreshold: 300 * time.Millisecond, + AverageDurationThreshold: avgThreshold, }, }, } // TODO instead of waiting, figure out when the engine setup is complete - time.Sleep(15 * time.Second) + time.Sleep(startupSleep) for _, tt := range tests { tt := tt // pin the loop variable diff --git a/cmd/hatchet-migrate/migrate/migrations/20260215120000_v1_0_77.sql b/cmd/hatchet-migrate/migrate/migrations/20260215120000_v1_0_78.sql similarity index 100% rename from cmd/hatchet-migrate/migrate/migrations/20260215120000_v1_0_77.sql rename to cmd/hatchet-migrate/migrate/migrations/20260215120000_v1_0_78.sql diff --git a/cmd/hatchet-migrate/migrate/migrations/20260216000001_v1_0_79_a_multi_slot_schema_and_triggers.sql b/cmd/hatchet-migrate/migrate/migrations/20260216000001_v1_0_79_a_multi_slot_schema_and_triggers.sql new file mode 100644 index 0000000000..7e47c46142 --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260216000001_v1_0_79_a_multi_slot_schema_and_triggers.sql @@ -0,0 +1,177 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE "Step" + ADD COLUMN IF NOT EXISTS "isDurable" BOOLEAN NOT NULL DEFAULT false; + +CREATE TABLE IF NOT EXISTS v1_worker_slot_config ( + tenant_id UUID NOT NULL, + worker_id UUID NOT NULL, + slot_type TEXT NOT NULL, + max_units INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (tenant_id, worker_id, slot_type) +); + +CREATE TABLE IF NOT EXISTS v1_step_slot_request ( + tenant_id UUID NOT NULL, + step_id UUID NOT NULL, + slot_type TEXT NOT NULL, + units INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (tenant_id, step_id, slot_type) +); + +CREATE TABLE IF NOT EXISTS v1_task_runtime_slot ( + tenant_id UUID NOT NULL, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + retry_count INTEGER NOT NULL, + worker_id UUID NOT NULL, + -- slot_type is user defined, we use default and durable internally as defaults + slot_type TEXT NOT NULL, + units INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (task_id, task_inserted_at, retry_count, slot_type) +); + +-- Compatibility triggers for blue/green: keep new slot tables updated from the old write paths. + +CREATE OR REPLACE FUNCTION v1_worker_slot_config_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_worker_slot_config (tenant_id, worker_id, slot_type, max_units) + SELECT + "tenantId", + "id", + 'default'::text, + "maxRuns" + FROM new_rows + WHERE "maxRuns" IS NOT NULL + ON CONFLICT (tenant_id, worker_id, slot_type) DO NOTHING; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS v1_worker_slot_config_insert_trigger ON "Worker"; + +CREATE TRIGGER v1_worker_slot_config_insert_trigger +AFTER INSERT ON "Worker" +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_worker_slot_config_insert_function(); + +CREATE OR REPLACE FUNCTION v1_step_slot_request_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_step_slot_request (tenant_id, step_id, slot_type, units) + SELECT + "tenantId", + "id", + CASE WHEN "isDurable" THEN 'durable'::text ELSE 'default'::text END, + 1 + FROM new_rows + ON CONFLICT (tenant_id, step_id, slot_type) DO NOTHING; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS v1_step_slot_request_insert_trigger ON "Step"; + +CREATE TRIGGER v1_step_slot_request_insert_trigger +AFTER INSERT ON "Step" +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_step_slot_request_insert_function(); + +CREATE OR REPLACE FUNCTION v1_task_runtime_slot_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_task_runtime_slot ( + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id, + slot_type, + units + ) + SELECT + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id, + 'default'::text, + 1 + FROM new_rows + WHERE worker_id IS NOT NULL + ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS v1_task_runtime_slot_insert_trigger ON v1_task_runtime; + +CREATE TRIGGER v1_task_runtime_slot_insert_trigger +AFTER INSERT ON v1_task_runtime +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_task_runtime_slot_insert_function(); + +CREATE OR REPLACE FUNCTION v1_task_runtime_slot_delete_function() +RETURNS TRIGGER AS +$$ +BEGIN + DELETE FROM v1_task_runtime_slot s + USING deleted_rows d + WHERE s.task_id = d.task_id + AND s.task_inserted_at = d.task_inserted_at + AND s.retry_count = d.retry_count; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS v1_task_runtime_slot_delete_trigger ON v1_task_runtime; + +CREATE TRIGGER v1_task_runtime_slot_delete_trigger +AFTER DELETE ON v1_task_runtime +REFERENCING OLD TABLE AS deleted_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_task_runtime_slot_delete_function(); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TRIGGER IF EXISTS v1_worker_slot_config_insert_trigger ON "Worker"; +DROP FUNCTION IF EXISTS v1_worker_slot_config_insert_function(); + +DROP TRIGGER IF EXISTS v1_step_slot_request_insert_trigger ON "Step"; +DROP FUNCTION IF EXISTS v1_step_slot_request_insert_function(); + +DROP TRIGGER IF EXISTS v1_task_runtime_slot_insert_trigger ON v1_task_runtime; +DROP FUNCTION IF EXISTS v1_task_runtime_slot_insert_function(); + +DROP TRIGGER IF EXISTS v1_task_runtime_slot_delete_trigger ON v1_task_runtime; +DROP FUNCTION IF EXISTS v1_task_runtime_slot_delete_function(); + +DROP TABLE IF EXISTS v1_task_runtime_slot; +DROP TABLE IF EXISTS v1_step_slot_request; +DROP TABLE IF EXISTS v1_worker_slot_config; + +ALTER TABLE "Step" + DROP COLUMN IF EXISTS "isDurable"; +-- +goose StatementEnd diff --git a/cmd/hatchet-migrate/migrate/migrations/20260216000002_v1_0_79_b_add_slot_indexes.sql b/cmd/hatchet-migrate/migrate/migrations/20260216000002_v1_0_79_b_add_slot_indexes.sql new file mode 100644 index 0000000000..d424f50d9f --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260216000002_v1_0_79_b_add_slot_indexes.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose NO TRANSACTION + +CREATE INDEX CONCURRENTLY IF NOT EXISTS v1_task_runtime_slot_tenant_worker_type_idx + ON v1_task_runtime_slot (tenant_id ASC, worker_id ASC, slot_type ASC); + +CREATE INDEX CONCURRENTLY IF NOT EXISTS v1_step_slot_request_step_idx + ON v1_step_slot_request (step_id ASC); + +-- +goose Down +DROP INDEX IF EXISTS v1_task_runtime_slot_tenant_worker_type_idx; +DROP INDEX IF EXISTS v1_step_slot_request_step_idx; diff --git a/cmd/hatchet-migrate/migrate/migrations/20260216000003_v1_0_79_c_backfill_slots.go b/cmd/hatchet-migrate/migrate/migrations/20260216000003_v1_0_79_c_backfill_slots.go new file mode 100644 index 0000000000..efec15789c --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260216000003_v1_0_79_c_backfill_slots.go @@ -0,0 +1,218 @@ +package migrations + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/pressly/goose/v3" +) + +func init() { + goose.AddMigrationNoTxContext(up20260216000003, down20260216000003) +} + +const backfillSlotsBatchSize = 10_000 + +var zeroUUID = uuid.Nil + +func up20260216000003(ctx context.Context, db *sql.DB) error { + if err := backfillWorkerSlotConfigs(ctx, db); err != nil { + return err + } + + if err := backfillStepSlotRequests(ctx, db); err != nil { + return err + } + + if err := backfillTaskRuntimeSlots(ctx, db); err != nil { + return err + } + + return nil +} + +// down20260216000003 is intentionally a no-op. +// +// By the time this migration runs, new services may already be writing to these +// tables. Deleting rows here would risk removing valid post-cutover data. +func down20260216000003(ctx context.Context, db *sql.DB) error { + return nil +} + +func backfillWorkerSlotConfigs(ctx context.Context, db *sql.DB) error { + lastWorkerID := zeroUUID + + for { + var ( + n int + nextWorkerID uuid.NullUUID + ) + + err := db.QueryRowContext(ctx, ` +WITH batch AS ( + SELECT + "tenantId" AS tenant_id, + "id" AS worker_id, + "maxRuns" AS max_units + FROM "Worker" + WHERE "maxRuns" IS NOT NULL + AND "id" > $1::uuid + ORDER BY "id" + LIMIT $2 +), +ins AS ( + INSERT INTO v1_worker_slot_config (tenant_id, worker_id, slot_type, max_units) + SELECT + tenant_id, + worker_id, + 'default'::text, + max_units + FROM batch + ON CONFLICT (tenant_id, worker_id, slot_type) DO NOTHING +) +SELECT + (SELECT COUNT(*) FROM batch) AS n, + (SELECT worker_id FROM batch ORDER BY worker_id DESC LIMIT 1) AS last_worker_id; +`, lastWorkerID, backfillSlotsBatchSize).Scan(&n, &nextWorkerID) + if err != nil { + return fmt.Errorf("backfill v1_worker_slot_config: %w", err) + } + + if n == 0 { + return nil + } + + if !nextWorkerID.Valid { + return fmt.Errorf("backfill v1_worker_slot_config: expected last keys for non-empty batch") + } + + lastWorkerID = nextWorkerID.UUID + } +} + +func backfillStepSlotRequests(ctx context.Context, db *sql.DB) error { + lastStepID := zeroUUID + + for { + var ( + n int + nextStep uuid.NullUUID + ) + + err := db.QueryRowContext(ctx, ` +WITH batch AS ( + SELECT + "tenantId" AS tenant_id, + "id" AS step_id, + "isDurable" AS is_durable + FROM "Step" + WHERE "id" > $1::uuid + ORDER BY "id" + LIMIT $2 +), +ins AS ( + INSERT INTO v1_step_slot_request (tenant_id, step_id, slot_type, units) + SELECT + tenant_id, + step_id, + CASE WHEN is_durable THEN 'durable'::text ELSE 'default'::text END, + 1 + FROM batch + ON CONFLICT (tenant_id, step_id, slot_type) DO NOTHING +) +SELECT + (SELECT COUNT(*) FROM batch) AS n, + (SELECT step_id FROM batch ORDER BY step_id DESC LIMIT 1) AS last_step_id; +`, lastStepID, backfillSlotsBatchSize).Scan(&n, &nextStep) + if err != nil { + return fmt.Errorf("backfill v1_step_slot_request: %w", err) + } + + if n == 0 { + return nil + } + + if !nextStep.Valid { + return fmt.Errorf("backfill v1_step_slot_request: expected last keys for non-empty batch") + } + + lastStepID = nextStep.UUID + } +} + +func backfillTaskRuntimeSlots(ctx context.Context, db *sql.DB) error { + var ( + lastTaskID int64 + lastTaskInsertedAt = time.Unix(0, 0).UTC() + lastRetryCount int32 + ) + + for { + var ( + n int + nextTaskID sql.NullInt64 + nextInsertedAt sql.NullTime + nextRetry sql.NullInt32 + ) + + err := db.QueryRowContext(ctx, ` +WITH batch AS ( + SELECT + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id + FROM v1_task_runtime + WHERE worker_id IS NOT NULL + AND (task_id, task_inserted_at, retry_count) > ($1::bigint, $2::timestamptz, $3::int) + ORDER BY task_id, task_inserted_at, retry_count + LIMIT $4 +), +ins AS ( + INSERT INTO v1_task_runtime_slot ( + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id, + slot_type, + units + ) + SELECT + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id, + 'default'::text, + 1 + FROM batch + ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING +) +SELECT + (SELECT COUNT(*) FROM batch) AS n, + (SELECT task_id FROM batch ORDER BY task_id DESC, task_inserted_at DESC, retry_count DESC LIMIT 1) AS last_task_id, + (SELECT task_inserted_at FROM batch ORDER BY task_id DESC, task_inserted_at DESC, retry_count DESC LIMIT 1) AS last_task_inserted_at, + (SELECT retry_count FROM batch ORDER BY task_id DESC, task_inserted_at DESC, retry_count DESC LIMIT 1) AS last_retry_count; +`, lastTaskID, lastTaskInsertedAt, lastRetryCount, backfillSlotsBatchSize).Scan(&n, &nextTaskID, &nextInsertedAt, &nextRetry) + if err != nil { + return fmt.Errorf("backfill v1_task_runtime_slot: %w", err) + } + + if n == 0 { + return nil + } + + if !nextTaskID.Valid || !nextInsertedAt.Valid || !nextRetry.Valid { + return fmt.Errorf("backfill v1_task_runtime_slot: expected last keys for non-empty batch") + } + + lastTaskID = nextTaskID.Int64 + lastTaskInsertedAt = nextInsertedAt.Time + lastRetryCount = nextRetry.Int32 + } +} diff --git a/examples/python/durable/test_durable.py b/examples/python/durable/test_durable.py index 1287c3e788..f85115ee40 100644 --- a/examples/python/durable/test_durable.py +++ b/examples/python/durable/test_durable.py @@ -27,15 +27,11 @@ async def test_durable(hatchet: Hatchet) -> None: active_workers = [w for w in workers.rows if w.status == "ACTIVE"] - assert len(active_workers) == 2 + assert len(active_workers) == 1 assert any( w.name == hatchet.config.apply_namespace("e2e-test-worker") for w in active_workers ) - assert any( - w.name == hatchet.config.apply_namespace("e2e-test-worker_durable") - for w in active_workers - ) assert result["durable_task"]["status"] == "success" diff --git a/examples/python/simple/chaos_test.py b/examples/python/simple/chaos_test.py new file mode 100644 index 0000000000..b2a41c28af --- /dev/null +++ b/examples/python/simple/chaos_test.py @@ -0,0 +1,155 @@ +# > Simple +import argparse +import asyncio +import signal +import threading +import time +import traceback +from typing import Any + +from datetime import datetime, timezone +from pathlib import Path + +from hatchet_sdk import Context, EmptyModel, Hatchet + +hatchet = Hatchet(debug=True) + +FAILURE_LOG = Path(__file__).parent / "failures.log" + +# Track the current worker so we can clean up on Ctrl+C +_current_worker = None +_current_thread = None +# poetry run python ./simple/worker_test.py --suffix new + + +def log_failure(phase: str, error: Exception) -> None: + """Log a failure loudly to stderr and append to the failures log file.""" + timestamp = datetime.now(timezone.utc).isoformat() + tb = traceback.format_exception(type(error), error, error.__traceback__) + tb_str = "".join(tb) + + msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}" + + # Loud stderr output + print(f"\n{'!' * 60}", flush=True) + print(f"!!! FAILURE: {phase} !!!", flush=True) + print(msg, flush=True) + print(f"{'!' * 60}\n", flush=True) + + # Append to log file + with open(FAILURE_LOG, "a") as f: + f.write(msg) + f.write("-" * 60 + "\n") + + +@hatchet.task() +def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: + print("Executing simple task!") + return {"result": "Hello, world!"} + + +@hatchet.durable_task() +def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: + print("Executing durable task!") + return {"result": "Hello from durable!"} + + +def _force_stop_worker(worker: Any, thread: threading.Thread) -> None: + """Forcefully terminate the worker and its child processes.""" + worker.killing = True + worker._terminate_processes() + worker._close_queues() + if worker.loop and worker.loop.is_running(): + worker.loop.call_soon_threadsafe(worker.loop.stop) + thread.join(timeout=5) + + +def start_worker(suffix: str = "") -> tuple[Any, threading.Thread]: + """Create and start a worker in a background thread.""" + name = f"test-worker-{suffix}" if suffix else "test-worker" + worker = hatchet.worker( + name, + workflows=[simple, simple_durable], + slots=10, + ) + worker.handle_kill = False # Prevent sys.exit on shutdown + + # Restore default signal handlers so Ctrl+C raises KeyboardInterrupt + signal.signal(signal.SIGINT, signal.default_int_handler) + signal.signal(signal.SIGTERM, signal.SIG_DFL) + + thread = threading.Thread(target=worker.start, daemon=True) + thread.start() + + # Give the worker a moment to initialize + time.sleep(2) + print("Worker connected.") + return worker, thread + + +def stop_worker(worker: Any, thread: threading.Thread) -> None: + """Stop the worker gracefully.""" + try: + if worker.loop and worker.loop.is_running(): + asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop) + thread.join(timeout=10) + if thread.is_alive(): + _force_stop_worker(worker, thread) + print("Worker disconnected.") + except Exception as e: + log_failure("worker disconnect", e) + + +def main() -> None: + global _current_worker, _current_thread + + parser = argparse.ArgumentParser() + parser.add_argument( + "--suffix", + default="", + help="Suffix to append to the worker name (e.g. 'old' or 'new')", + ) + args = parser.parse_args() + + try: + while True: + # --- Connect the worker --- + print("\n=== Connecting worker ===") + try: + worker, thread = start_worker(args.suffix) + _current_worker, _current_thread = worker, thread + except Exception as e: + log_failure("worker connect", e) + time.sleep(5) + continue + + # --- Trigger tasks every 1 second for 5 seconds --- + for tick in range(5): + time.sleep(1) + print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---") + try: + ref = simple.run_no_wait() + print(f"Task triggered: {ref}") + except Exception as e: + log_failure(f"task trigger (tick {tick + 1}/5)", e) + try: + ref = simple_durable.run_no_wait() + print(f"Durable task triggered: {ref}") + except Exception as e: + log_failure(f"durable task trigger (tick {tick + 1}/5)", e) + + # --- Disconnect the worker --- + print("\n=== Disconnecting worker ===") + stop_worker(worker, thread) + _current_worker, _current_thread = None, None + + except KeyboardInterrupt: + print("\n\nCtrl+C received, shutting down...") + if _current_worker and _current_thread: + _force_stop_worker(_current_worker, _current_thread) + print("Bye!") + + + +if __name__ == "__main__": + main() diff --git a/examples/python/simple/chaos_worker.py b/examples/python/simple/chaos_worker.py new file mode 100644 index 0000000000..3abe078a46 --- /dev/null +++ b/examples/python/simple/chaos_worker.py @@ -0,0 +1,154 @@ +# This is a worker script that will introduce chaos to test +# complex deployments and migrations. +import argparse +import asyncio +import signal +import threading +import time +import traceback +from datetime import datetime, timezone +from pathlib import Path + +from hatchet_sdk import Context, EmptyModel, Hatchet + +hatchet = Hatchet(debug=True) + +FAILURE_LOG = Path(__file__).parent / "failures.log" + +# Track the current worker so we can clean up on Ctrl+C +_current_worker = None +_current_thread = None +# poetry run python ./simple/worker_test.py --suffix new + + +def log_failure(phase: str, error: Exception) -> None: + """Log a failure loudly to stderr and append to the failures log file.""" + timestamp = datetime.now(timezone.utc).isoformat() + tb = traceback.format_exception(type(error), error, error.__traceback__) + tb_str = "".join(tb) + + msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}" + + # Loud stderr output + print(f"\n{'!' * 60}", flush=True) + print(f"!!! FAILURE: {phase} !!!", flush=True) + print(msg, flush=True) + print(f"{'!' * 60}\n", flush=True) + + # Append to log file + with open(FAILURE_LOG, "a") as f: + f.write(msg) + f.write("-" * 60 + "\n") + + +@hatchet.task() +def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: + print("Executing simple task!") + return {"result": "Hello, world!"} + + +@hatchet.durable_task() +def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: + print("Executing durable task!") + return {"result": "Hello from durable!"} + + +def _force_stop_worker(worker, thread) -> None: + """Forcefully terminate the worker and its child processes.""" + worker.killing = True + worker._terminate_processes() + worker._close_queues() + if worker.loop and worker.loop.is_running(): + worker.loop.call_soon_threadsafe(worker.loop.stop) + thread.join(timeout=5) + + +def start_worker(suffix: str = "") -> tuple: + """Create and start a worker in a background thread.""" + name = f"test-worker-{suffix}" if suffix else "test-worker" + worker = hatchet.worker( + name, + workflows=[simple, simple_durable], + slots=10, + ) + worker.handle_kill = False # Prevent sys.exit on shutdown + + # Restore default signal handlers so Ctrl+C raises KeyboardInterrupt + signal.signal(signal.SIGINT, signal.default_int_handler) + signal.signal(signal.SIGTERM, signal.SIG_DFL) + + thread = threading.Thread(target=worker.start, daemon=True) + thread.start() + + # Give the worker a moment to initialize + time.sleep(2) + print("Worker connected.") + return worker, thread + + +def stop_worker(worker, thread) -> None: + """Stop the worker gracefully.""" + try: + if worker.loop and worker.loop.is_running(): + asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop) + thread.join(timeout=10) + if thread.is_alive(): + _force_stop_worker(worker, thread) + print("Worker disconnected.") + except Exception as e: + log_failure("worker disconnect", e) + + +def main() -> None: + global _current_worker, _current_thread + + parser = argparse.ArgumentParser() + parser.add_argument( + "--suffix", + default="", + help="Suffix to append to the worker name (e.g. 'old' or 'new')", + ) + args = parser.parse_args() + + try: + while True: + # --- Connect the worker --- + print("\n=== Connecting worker ===") + try: + worker, thread = start_worker(args.suffix) + _current_worker, _current_thread = worker, thread + except Exception as e: + log_failure("worker connect", e) + time.sleep(5) + continue + + # --- Trigger tasks every 1 second for 5 seconds --- + for tick in range(5): + time.sleep(1) + print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---") + try: + ref = simple.run_no_wait() + print(f"Task triggered: {ref}") + except Exception as e: + log_failure(f"task trigger (tick {tick + 1}/5)", e) + try: + ref = simple_durable.run_no_wait() + print(f"Durable task triggered: {ref}") + except Exception as e: + log_failure(f"durable task trigger (tick {tick + 1}/5)", e) + + # --- Disconnect the worker --- + print("\n=== Disconnecting worker ===") + stop_worker(worker, thread) + _current_worker, _current_thread = None, None + + except KeyboardInterrupt: + print("\n\nCtrl+C received, shutting down...") + if _current_worker and _current_thread: + _force_stop_worker(_current_worker, _current_thread) + print("Bye!") + + + +if __name__ == "__main__": + main() diff --git a/examples/python/simple/worker.py b/examples/python/simple/worker.py index 2bd0361661..85bb98a8ce 100644 --- a/examples/python/simple/worker.py +++ b/examples/python/simple/worker.py @@ -1,5 +1,4 @@ # > Simple - from hatchet_sdk import Context, EmptyModel, Hatchet hatchet = Hatchet(debug=True) @@ -16,7 +15,10 @@ def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: def main() -> None: - worker = hatchet.worker("test-worker", workflows=[simple, simple_durable]) + worker = hatchet.worker( + "test-worker", + workflows=[simple, simple_durable], + ) worker.start() diff --git a/frontend/app/src/lib/api/generated/data-contracts.ts b/frontend/app/src/lib/api/generated/data-contracts.ts index e9875b6377..ef9bff9026 100644 --- a/frontend/app/src/lib/api/generated/data-contracts.ts +++ b/frontend/app/src/lib/api/generated/data-contracts.ts @@ -1577,6 +1577,10 @@ export interface Step { action: string; /** The timeout of the step. */ timeout?: string; + /** Whether the step is durable. */ + isDurable?: boolean; + /** Slot requests for the step (slot_type -> units). */ + slotRequests?: Record; children?: string[]; parents?: string[]; } @@ -2155,6 +2159,14 @@ export interface RecentStepRuns { workflowRunId: string; } +/** Slot availability and limits for a slot type. */ +export interface WorkerSlotConfig { + /** The number of available units for this slot type. */ + available?: number; + /** The maximum number of units for this slot type. */ + limit: number; +} + export interface WorkerLabel { metadata: APIResourceMeta; /** The key of the label. */ @@ -2198,10 +2210,8 @@ export interface Worker { recentStepRuns?: RecentStepRuns[]; /** The status of the worker. */ status?: "ACTIVE" | "INACTIVE" | "PAUSED"; - /** The maximum number of runs this worker can execute concurrently. */ - maxRuns?: number; - /** The number of runs this worker can execute concurrently. */ - availableRuns?: number; + /** Slot availability and limits for this worker (slot_type -> { available, limit }). */ + slotConfig?: Record; /** * the id of the assigned dispatcher, in UUID format * @format uuid diff --git a/frontend/app/src/pages/main/v1/workers/$worker/index.tsx b/frontend/app/src/pages/main/v1/workers/$worker/index.tsx index 053b7c138b..5608d28e46 100644 --- a/frontend/app/src/pages/main/v1/workers/$worker/index.tsx +++ b/frontend/app/src/pages/main/v1/workers/$worker/index.tsx @@ -177,11 +177,9 @@ export default function WorkerDetail() { return ; } - const availableSlots = worker.availableRuns ?? 0; - const maxSlots = worker.maxRuns ?? 0; - const usedSlots = maxSlots - availableSlots; - const usedPercentage = - maxSlots > 0 ? Math.round((usedSlots / maxSlots) * 100) : 0; + const slotCapacityEntries = Object.entries(worker.slotConfig || {}).sort( + ([a], [b]) => a.localeCompare(b), + ); // dynamically set the max columns in the grid based on the presence of runtime info and labels const maxCols = @@ -276,30 +274,54 @@ export default function WorkerDetail() { className="h-52 overflow-y-auto bg-background border-none" > - Available Run Slots + Slots - -
- - {maxSlots > 0 ? availableSlots : '∞'} - - {maxSlots > 0 && ( - - / {maxSlots} total - - )} -
- {maxSlots > 0 && ( -
-
-
-
-
- {usedSlots} used, {availableSlots} available -
+ + {slotCapacityEntries.length === 0 ? ( +
+ No slots +
+ ) : ( +
+ {slotCapacityEntries.map(([slotType, capacity]) => { + const available = capacity?.available; + const limit = capacity?.limit ?? 0; + const showAvailability = available !== undefined; + const used = showAvailability ? limit - available : 0; + const usedPercentage = + showAvailability && limit > 0 + ? Math.round((used / limit) * 100) + : 0; + const label = showAvailability + ? `${available} / ${limit}` + : `${limit}`; + + return ( +
+
+ + {slotType} + + + {label} + +
+ {showAvailability && limit > 0 && ( +
+
+
+
+
+ {used} used, {available} available +
+
+ )} +
+ ); + })}
)}

diff --git a/frontend/app/src/pages/main/v1/workers/components/worker-columns.tsx b/frontend/app/src/pages/main/v1/workers/components/worker-columns.tsx index 0efa7d50b9..397db6dd64 100644 --- a/frontend/app/src/pages/main/v1/workers/components/worker-columns.tsx +++ b/frontend/app/src/pages/main/v1/workers/components/worker-columns.tsx @@ -13,7 +13,7 @@ export const WorkerColumn = { name: 'Name', type: 'Type', startedAt: 'Started at', - slots: 'Available Slots', + slots: 'Slots', lastHeartbeatAt: 'Last seen', runtime: 'SDK Version', } as const; @@ -181,11 +181,34 @@ export const columns: (tenantId: string) => ColumnDef[] = ( header: ({ column }) => ( ), - cell: ({ row }) => ( -

- {row.original.availableRuns} / {row.original.maxRuns} -
- ), + cell: ({ row }) => { + const slotConfig = row.original.slotConfig || {}; + const entries = Object.entries(slotConfig).sort(([a], [b]) => + a.localeCompare(b), + ); + + if (entries.length === 0) { + return
No slots
; + } + + return ( +
+ {entries.map(([slotType, capacity]) => { + const available = capacity?.available; + const limit = capacity?.limit; + const label = + available !== undefined ? `${available} / ${limit}` : `${limit}`; + + return ( +
+ {slotType}:{' '} + {label} +
+ ); + })} +
+ ); + }, enableSorting: false, enableHiding: true, }, diff --git a/frontend/docs/pages/home/durable-execution.mdx b/frontend/docs/pages/home/durable-execution.mdx index efda8b42dc..7c1985935c 100644 --- a/frontend/docs/pages/home/durable-execution.mdx +++ b/frontend/docs/pages/home/durable-execution.mdx @@ -22,13 +22,13 @@ This is especially useful in cases such as: ## How Hatchet Runs Durable Tasks -When you register a durable task, Hatchet will start a second worker in the background for running durable tasks. If you don't register any durable workflows, the durable worker will not be started. Similarly, if you start a worker with _only_ durable workflows, the "main" worker will not start, and _only_ the durable worker will run. The durable worker will show up as a second worker in the Hatchet Dashboard. +Durable tasks run on the same worker process as regular tasks, but they consume a separate slot type so they do not compete with regular tasks for slots. This pattern prevents deadlock scenarios where durable tasks would starve children tasks for slots which are needed for the parent durable task to complete. Tasks that are declared as being durable (using `durable_task` instead of `task`), will receive a `DurableContext` object instead of a normal `Context,` which extends the `Context` by providing some additional tools for working with durable execution features. ## Example Task -Now that we know a bit about how Hatchet handles durable execution, let's build a task. We'll start by declaring a task that will run durably, on the "durable worker". +Now that we know a bit about how Hatchet handles durable execution, let's build a task. We'll start by declaring a task that will run durably. diff --git a/frontend/docs/pages/sdks/python/client.mdx b/frontend/docs/pages/sdks/python/client.mdx index f500de1330..7039cb85fc 100644 --- a/frontend/docs/pages/sdks/python/client.mdx +++ b/frontend/docs/pages/sdks/python/client.mdx @@ -73,14 +73,14 @@ Create a Hatchet worker on which to run workflows. Parameters: -| Name | Type | Description | Default | -| --------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------- | -| `name` | `str` | The name of the worker. | _required_ | -| `slots` | `int` | The number of workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time | `100` | -| `durable_slots` | `int` | The number of durable workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time that are durable. | `1000` | -| `labels` | `dict[str, str \| int] \| None` | A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. | `None` | -| `workflows` | `list[BaseWorkflow[Any]] \| None` | A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them. | `None` | -| `lifespan` | `LifespanFn \| None` | A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks. | `None` | +| Name | Type | Description | Default | +| --------------- | --------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | +| `name` | `str` | The name of the worker. | _required_ | +| `slots` | `int` | Maximum number of concurrent runs. | `100` | +| `durable_slots` | `int` | Maximum number of concurrent durable tasks. | `1000` | +| `labels` | `dict[str, str \| int] \| None` | A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. | `None` | +| `workflows` | `list[BaseWorkflow[Any]] \| None` | A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them. | `None` | +| `lifespan` | `LifespanFn \| None` | A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks. | `None` | Returns: diff --git a/internal/msgqueue/msg.go b/internal/msgqueue/msg.go index 5b458a90c0..181964a290 100644 --- a/internal/msgqueue/msg.go +++ b/internal/msgqueue/msg.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/datautils" ) diff --git a/internal/msgqueue/msgqueue.go b/internal/msgqueue/msgqueue.go index eb0e63af81..ad94e4204a 100644 --- a/internal/msgqueue/msgqueue.go +++ b/internal/msgqueue/msgqueue.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/random" ) diff --git a/internal/msgqueue/postgres/exchange.go b/internal/msgqueue/postgres/exchange.go index eda155c8c0..27eced7c2d 100644 --- a/internal/msgqueue/postgres/exchange.go +++ b/internal/msgqueue/postgres/exchange.go @@ -7,6 +7,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" ) diff --git a/internal/operation/interval_test.go b/internal/operation/interval_test.go index 3af3501e7e..248b186d6a 100644 --- a/internal/operation/interval_test.go +++ b/internal/operation/interval_test.go @@ -72,7 +72,7 @@ func TestInterval_RunInterval_WithJitter(t *testing.T) { assert.GreaterOrEqual(t, len(timings), 2, "Should have at least 2 timing measurements") for _, timing := range timings { assert.GreaterOrEqual(t, timing, 50*time.Millisecond, "Timing should be at least the base interval") - assert.LessOrEqual(t, timing, 75*time.Millisecond, "Timing should include jitter but not exceed base + max jitter + buffer") + assert.LessOrEqual(t, timing, 85*time.Millisecond, "Timing should include jitter but not exceed base + max jitter + buffer") } return case <-ch: diff --git a/internal/services/admin/v1/server.go b/internal/services/admin/v1/server.go index ab190bd473..6461576f08 100644 --- a/internal/services/admin/v1/server.go +++ b/internal/services/admin/v1/server.go @@ -911,6 +911,12 @@ func getCreateTaskOpts(tasks []*contracts.CreateTaskOpts, kind string) ([]v1.Cre TriggerConditions: make([]v1.CreateStepMatchConditionOpt, 0), RateLimits: make([]v1.CreateWorkflowStepRateLimitOpts, 0), // Initialize to avoid nil ScheduleTimeout: stepCp.ScheduleTimeout, + IsDurable: stepCp.IsDurable, + SlotRequests: nil, + } + + if stepCp.SlotRequests != nil { + steps[j].SlotRequests = stepCp.SlotRequests } // Safely set Parents diff --git a/internal/services/controllers/metrics/collector.go b/internal/services/controllers/metrics/collector.go index 51d89ba0ac..b9fceba9c8 100644 --- a/internal/services/controllers/metrics/collector.go +++ b/internal/services/controllers/metrics/collector.go @@ -457,23 +457,42 @@ func (mc *MetricsCollectorImpl) collectWorkerMetrics(ctx context.Context) func() mc.l.Debug().Msg("collecting worker metrics") - // Count active slots per tenant - activeSlots, err := mc.repo.Workers().CountActiveSlotsPerTenant() + // Count active slots per tenant (total) + activeSlotsTotal, err := mc.repo.Workers().ListTotalActiveSlotsPerTenant(ctx) switch { case err != nil: - mc.l.Error().Err(err).Msg("failed to count active slots per tenant") - case len(activeSlots) == 0: + mc.l.Error().Err(err).Msg("failed to list total active slots per tenant") + case len(activeSlotsTotal) == 0: mc.l.Debug().Msg("no active worker slots found") default: - mc.l.Info().Int("tenant_count", len(activeSlots)).Msg("recording active slots metrics") - for tenantId, count := range activeSlots { + mc.l.Info().Int("tenant_count", len(activeSlotsTotal)).Msg("recording active slots metrics") + for tenantId, count := range activeSlotsTotal { mc.recorder.RecordActiveSlots(ctx, tenantId, count) mc.l.Debug().Str("tenant_id", tenantId.String()).Int64("count", count).Msg("recorded active slots metric") } } + // Count active slots per tenant and slot key + activeSlotsByKey, err := mc.repo.Workers().ListActiveSlotsPerTenantAndSlotType(ctx) + switch { + case err != nil: + mc.l.Error().Err(err).Msg("failed to list active slots per tenant and slot key") + case len(activeSlotsByKey) == 0: + mc.l.Debug().Msg("no active worker slots by key found") + default: + mc.l.Info().Int("slot_count", len(activeSlotsByKey)).Msg("recording active slots by key metrics") + for tuple, count := range activeSlotsByKey { + mc.recorder.RecordActiveSlotsByKey(ctx, tuple.TenantId, tuple.SlotType, count) + mc.l.Debug(). + Str("tenant_id", tuple.TenantId.String()). + Str("slot_key", tuple.SlotType). + Int64("count", count). + Msg("recorded active slots by key metric") + } + } + // Count active workers per tenant - activeWorkers, err := mc.repo.Workers().CountActiveWorkersPerTenant() + activeWorkers, err := mc.repo.Workers().CountActiveWorkersPerTenant(ctx) switch { case err != nil: mc.l.Error().Err(err).Msg("failed to count active workers per tenant") @@ -488,7 +507,7 @@ func (mc *MetricsCollectorImpl) collectWorkerMetrics(ctx context.Context) func() } // Count active SDKs per tenant - activeSDKs, err := mc.repo.Workers().ListActiveSDKsPerTenant() + activeSDKs, err := mc.repo.Workers().ListActiveSDKsPerTenant(ctx) switch { case err != nil: diff --git a/internal/services/controllers/olap/process_alerts.go b/internal/services/controllers/olap/process_alerts.go index c8b1a964e0..35080d1304 100644 --- a/internal/services/controllers/olap/process_alerts.go +++ b/internal/services/controllers/olap/process_alerts.go @@ -6,6 +6,7 @@ import ( "time" "github.com/google/uuid" + v1 "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" "github.com/hatchet-dev/hatchet/pkg/telemetry" diff --git a/internal/services/controllers/olap/process_dag_status_updates.go b/internal/services/controllers/olap/process_dag_status_updates.go index 5ed405dbd6..497b5bbe0f 100644 --- a/internal/services/controllers/olap/process_dag_status_updates.go +++ b/internal/services/controllers/olap/process_dag_status_updates.go @@ -5,6 +5,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" v1 "github.com/hatchet-dev/hatchet/pkg/repository" diff --git a/internal/services/controllers/olap/process_task_status_updates.go b/internal/services/controllers/olap/process_task_status_updates.go index 71b5b7cc59..3520fa19e4 100644 --- a/internal/services/controllers/olap/process_task_status_updates.go +++ b/internal/services/controllers/olap/process_task_status_updates.go @@ -5,6 +5,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" v1 "github.com/hatchet-dev/hatchet/pkg/repository" diff --git a/internal/services/controllers/task/evict_idempotency_keys.go b/internal/services/controllers/task/evict_idempotency_keys.go index db6d69e69e..c429436ec1 100644 --- a/internal/services/controllers/task/evict_idempotency_keys.go +++ b/internal/services/controllers/task/evict_idempotency_keys.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/telemetry" ) diff --git a/internal/services/controllers/task/process_reassignments.go b/internal/services/controllers/task/process_reassignments.go index 0e5b885779..7a8b0bb9e4 100644 --- a/internal/services/controllers/task/process_reassignments.go +++ b/internal/services/controllers/task/process_reassignments.go @@ -6,6 +6,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" "github.com/hatchet-dev/hatchet/pkg/integrations/metrics/prometheus" diff --git a/internal/services/controllers/task/process_sleeps.go b/internal/services/controllers/task/process_sleeps.go index 9c03d16504..de7c0d6fd3 100644 --- a/internal/services/controllers/task/process_sleeps.go +++ b/internal/services/controllers/task/process_sleeps.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/telemetry" ) diff --git a/internal/services/controllers/task/process_timeouts.go b/internal/services/controllers/task/process_timeouts.go index b2e2384818..7cf491c3c8 100644 --- a/internal/services/controllers/task/process_timeouts.go +++ b/internal/services/controllers/task/process_timeouts.go @@ -6,6 +6,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" diff --git a/internal/services/dispatcher/contracts/dispatcher.pb.go b/internal/services/dispatcher/contracts/dispatcher.pb.go index b34f12f879..21722b3b14 100644 --- a/internal/services/dispatcher/contracts/dispatcher.pb.go +++ b/internal/services/dispatcher/contracts/dispatcher.pb.go @@ -531,7 +531,8 @@ type WorkerRegisterRequest struct { Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` // (optional) the services for this worker Services []string `protobuf:"bytes,3,rep,name=services,proto3" json:"services,omitempty"` - // (optional) the number of slots this worker can handle + // (optional) the number of default slots this worker can handle + // deprecated: use slot_config instead Slots *int32 `protobuf:"varint,4,opt,name=slots,proto3,oneof" json:"slots,omitempty"` // (optional) worker labels (i.e. state or other metadata) Labels map[string]*WorkerLabels `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -539,6 +540,8 @@ type WorkerRegisterRequest struct { WebhookId *string `protobuf:"bytes,6,opt,name=webhook_id,json=webhookId,proto3,oneof" json:"webhook_id,omitempty"` // (optional) information regarding the runtime environment of the worker RuntimeInfo *RuntimeInfo `protobuf:"bytes,7,opt,name=runtime_info,json=runtimeInfo,proto3,oneof" json:"runtime_info,omitempty"` + // (optional) slot config for this worker (slot_type -> units) + SlotConfig map[string]int32 `protobuf:"bytes,9,rep,name=slot_config,json=slotConfig,proto3" json:"slot_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } func (x *WorkerRegisterRequest) Reset() { @@ -622,6 +625,13 @@ func (x *WorkerRegisterRequest) GetRuntimeInfo() *RuntimeInfo { return nil } +func (x *WorkerRegisterRequest) GetSlotConfig() map[string]int32 { + if x != nil { + return x.SlotConfig + } + return nil +} + type WorkerRegisterResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2255,6 +2265,91 @@ func (*ReleaseSlotResponse) Descriptor() ([]byte, []int) { return file_dispatcher_proto_rawDescGZIP(), []int{25} } +type GetVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetVersionRequest) Reset() { + *x = GetVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_dispatcher_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVersionRequest) ProtoMessage() {} + +func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_dispatcher_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. +func (*GetVersionRequest) Descriptor() ([]byte, []int) { + return file_dispatcher_proto_rawDescGZIP(), []int{26} +} + +type GetVersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *GetVersionResponse) Reset() { + *x = GetVersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_dispatcher_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVersionResponse) ProtoMessage() {} + +func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_dispatcher_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. +func (*GetVersionResponse) Descriptor() ([]byte, []int) { + return file_dispatcher_proto_rawDescGZIP(), []int{27} +} + +func (x *GetVersionResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + var File_dispatcher_proto protoreflect.FileDescriptor var file_dispatcher_proto_rawDesc = []byte{ @@ -2284,7 +2379,7 @@ var file_dispatcher_proto_rawDesc = []byte{ 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x6f, 0x73, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, - 0x78, 0x74, 0x72, 0x61, 0x22, 0x93, 0x03, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, + 0x78, 0x74, 0x72, 0x61, 0x22, 0x9b, 0x04, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, @@ -2302,401 +2397,417 @@ var file_dispatcher_proto_rawDesc = []byte{ 0x12, 0x34, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x02, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x1a, 0x48, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x77, - 0x65, 0x62, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x69, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x73, 0x0a, 0x16, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0xc2, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x55, 0x70, 0x73, - 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x48, 0x0a, 0x0b, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x56, 0x0a, 0x1a, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, - 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0xab, 0x07, 0x0a, - 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, + 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x0b, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x73, 0x6c, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x48, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x73, 0x6c, 0x6f, + 0x74, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x77, 0x65, 0x62, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x69, + 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x22, 0x73, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x48, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x56, 0x0a, 0x1a, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x22, 0xab, 0x07, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x14, + 0x67, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x67, 0x65, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x06, + 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, + 0x62, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, + 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, + 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x12, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x02, 0x52, 0x10, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x16, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x24, 0x0a, 0x0b, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x04, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x88, + 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x05, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, + 0x17, 0x0a, 0x15, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x42, + 0x19, 0x0a, 0x17, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x18, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, + 0x55, 0x0a, 0x19, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0xca, 0x02, 0x0a, 0x13, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x14, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x67, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x37, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x22, 0xe3, 0x03, 0x0a, 0x0f, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, + 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, + 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, + 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x33, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x53, 0x74, 0x65, 0x70, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x10, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x01, 0x52, 0x0e, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x4e, 0x6f, 0x74, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x22, 0x4f, 0x0a, 0x13, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x83, 0x02, 0x0a, 0x20, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x11, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x88, 0x01, + 0x01, 0x12, 0x37, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x02, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, + 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x48, 0x0a, 0x1e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, + 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0xe6, 0x03, 0x0a, 0x0d, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x14, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x67, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6a, - 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, - 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x2f, 0x0a, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x75, 0x6e, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, 0x12, 0x26, + 0x0a, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x0a, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x02, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, + 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x22, 0xdf, 0x01, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, + 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, + 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, + 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, + 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, + 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, + 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, + 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x6e, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x3d, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x22, + 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7a, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, - 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x0b, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x0b, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, - 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x05, 0x48, 0x01, 0x52, 0x12, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x10, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x38, - 0x0a, 0x16, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, - 0x52, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x75, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x12, 0x24, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x69, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x0a, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x48, 0x05, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, - 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x69, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x37, - 0x0a, 0x18, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x55, 0x0a, 0x19, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0xca, - 0x02, 0x0a, 0x13, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x14, 0x67, - 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x75, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x67, 0x65, 0x74, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x37, 0x0a, - 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xe3, 0x03, 0x0a, 0x0f, - 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x06, - 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, - 0x62, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, - 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, - 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x33, 0x0a, - 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x14, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0a, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, - 0x10, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x68, 0x6f, 0x75, 0x6c, - 0x64, 0x4e, 0x6f, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, - 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x13, 0x0a, 0x11, - 0x5f, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x22, 0x4f, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, - 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x49, 0x64, 0x22, 0x83, 0x02, 0x0a, 0x20, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, - 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x01, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, - 0x65, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x37, 0x0a, 0x15, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, - 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x18, - 0x0a, 0x16, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x48, 0x0a, 0x1e, 0x53, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, - 0x49, 0x64, 0x22, 0xe6, 0x03, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x31, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, 0x12, 0x26, 0x0a, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0b, - 0x74, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x24, - 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xdf, 0x01, 0x0a, 0x10, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, - 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc8, 0x01, - 0x0a, 0x0d, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, - 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x4f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, - 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, - 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x17, - 0x0a, 0x15, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6e, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x72, - 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, + 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, + 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, + 0x22, 0x53, 0x0a, 0x16, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x72, - 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7a, 0x0a, 0x15, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, - 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x22, 0x53, 0x0a, 0x16, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x45, 0x0a, - 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, - 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x41, 0x0a, 0x04, 0x53, - 0x44, 0x4b, 0x53, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x06, 0x0a, 0x02, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, - 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, - 0x50, 0x54, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x55, 0x42, 0x59, 0x10, 0x04, 0x2a, 0x4e, - 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, - 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x00, - 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, - 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x47, - 0x45, 0x54, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, 0x2a, 0xa2, - 0x01, 0x0a, 0x17, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, - 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, - 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x22, - 0x0a, 0x1e, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, - 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, - 0x44, 0x10, 0x03, 0x2a, 0xac, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, - 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, - 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, - 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, - 0x45, 0x44, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, - 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x41, 0x43, 0x4b, 0x4e, 0x4f, 0x57, 0x4c, 0x45, 0x44, 0x47, 0x45, 0x44, - 0x10, 0x04, 0x2a, 0x65, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, - 0x16, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, - 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, - 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x02, 0x2a, 0xfe, 0x01, 0x0a, 0x11, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, - 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, - 0x45, 0x44, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, - 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, - 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, - 0x49, 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, - 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x06, 0x2a, 0x3c, 0x0a, 0x14, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, - 0x55, 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, - 0x4e, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x00, 0x32, 0xf8, 0x06, 0x0a, 0x0a, 0x44, 0x69, 0x73, - 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, - 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x56, 0x32, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, - 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, - 0x30, 0x01, 0x12, 0x34, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, - 0x11, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, - 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x17, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, - 0x01, 0x12, 0x3f, 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x47, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, - 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x45, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, + 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x41, 0x0a, 0x04, 0x53, 0x44, 0x4b, 0x53, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x06, 0x0a, + 0x02, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, + 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x10, + 0x03, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x55, 0x42, 0x59, 0x10, 0x04, 0x2a, 0x4e, 0x0a, 0x0a, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, + 0x52, 0x54, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, + 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x47, 0x45, 0x54, 0x5f, + 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, 0x2a, 0xa2, 0x01, 0x0a, 0x17, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x10, 0x50, - 0x75, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x0e, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, - 0x16, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0b, 0x55, 0x6e, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x19, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, - 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, 0x55, 0x50, + 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x47, + 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x1f, 0x0a, 0x1b, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, + 0x2a, 0xac, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, + 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, + 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x20, 0x0a, + 0x1c, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x41, 0x43, 0x4b, 0x4e, 0x4f, 0x57, 0x4c, 0x45, 0x44, 0x47, 0x45, 0x44, 0x10, 0x04, 0x2a, + 0x65, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, + 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x45, 0x50, + 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, + 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x02, 0x2a, 0xfe, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1f, 0x0a, + 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x21, + 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, + 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, + 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, + 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x49, 0x4d, 0x45, + 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x06, 0x2a, 0x3c, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4e, 0x49, 0x53, + 0x48, 0x45, 0x44, 0x10, 0x00, 0x32, 0xb1, 0x07, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, 0x14, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x56, 0x32, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x34, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x11, 0x2e, 0x48, + 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x12, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x21, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x17, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x52, 0x75, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3f, + 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x47, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x10, 0x50, 0x75, 0x74, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x0e, 0x2e, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0b, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x19, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1a, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, + 0x0a, 0x0e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x16, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, + 0x6f, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4f, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x37, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, + 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, + 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, + 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2712,7 +2823,7 @@ func file_dispatcher_proto_rawDescGZIP() []byte { } var file_dispatcher_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 31) var file_dispatcher_proto_goTypes = []interface{}{ (SDKS)(0), // 0: SDKS (ActionType)(0), // 1: ActionType @@ -2747,61 +2858,67 @@ var file_dispatcher_proto_goTypes = []interface{}{ (*RefreshTimeoutResponse)(nil), // 30: RefreshTimeoutResponse (*ReleaseSlotRequest)(nil), // 31: ReleaseSlotRequest (*ReleaseSlotResponse)(nil), // 32: ReleaseSlotResponse - nil, // 33: WorkerRegisterRequest.LabelsEntry - nil, // 34: UpsertWorkerLabelsRequest.LabelsEntry - (*timestamppb.Timestamp)(nil), // 35: google.protobuf.Timestamp + (*GetVersionRequest)(nil), // 33: GetVersionRequest + (*GetVersionResponse)(nil), // 34: GetVersionResponse + nil, // 35: WorkerRegisterRequest.LabelsEntry + nil, // 36: WorkerRegisterRequest.SlotConfigEntry + nil, // 37: UpsertWorkerLabelsRequest.LabelsEntry + (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp } var file_dispatcher_proto_depIdxs = []int32{ 0, // 0: RuntimeInfo.language:type_name -> SDKS - 33, // 1: WorkerRegisterRequest.labels:type_name -> WorkerRegisterRequest.LabelsEntry + 35, // 1: WorkerRegisterRequest.labels:type_name -> WorkerRegisterRequest.LabelsEntry 8, // 2: WorkerRegisterRequest.runtime_info:type_name -> RuntimeInfo - 34, // 3: UpsertWorkerLabelsRequest.labels:type_name -> UpsertWorkerLabelsRequest.LabelsEntry - 1, // 4: AssignedAction.action_type:type_name -> ActionType - 35, // 5: GroupKeyActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp - 2, // 6: GroupKeyActionEvent.event_type:type_name -> GroupKeyActionEventType - 35, // 7: StepActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp - 3, // 8: StepActionEvent.event_type:type_name -> StepActionEventType - 4, // 9: WorkflowEvent.resource_type:type_name -> ResourceType - 5, // 10: WorkflowEvent.event_type:type_name -> ResourceEventType - 35, // 11: WorkflowEvent.event_timestamp:type_name -> google.protobuf.Timestamp - 6, // 12: WorkflowRunEvent.event_type:type_name -> WorkflowRunEventType - 35, // 13: WorkflowRunEvent.event_timestamp:type_name -> google.protobuf.Timestamp - 24, // 14: WorkflowRunEvent.results:type_name -> StepRunResult - 35, // 15: HeartbeatRequest.heartbeat_at:type_name -> google.protobuf.Timestamp - 35, // 16: RefreshTimeoutResponse.timeout_at:type_name -> google.protobuf.Timestamp - 7, // 17: WorkerRegisterRequest.LabelsEntry.value:type_name -> WorkerLabels - 7, // 18: UpsertWorkerLabelsRequest.LabelsEntry.value:type_name -> WorkerLabels - 9, // 19: Dispatcher.Register:input_type -> WorkerRegisterRequest - 14, // 20: Dispatcher.Listen:input_type -> WorkerListenRequest - 14, // 21: Dispatcher.ListenV2:input_type -> WorkerListenRequest - 27, // 22: Dispatcher.Heartbeat:input_type -> HeartbeatRequest - 20, // 23: Dispatcher.SubscribeToWorkflowEvents:input_type -> SubscribeToWorkflowEventsRequest - 21, // 24: Dispatcher.SubscribeToWorkflowRuns:input_type -> SubscribeToWorkflowRunsRequest - 18, // 25: Dispatcher.SendStepActionEvent:input_type -> StepActionEvent - 17, // 26: Dispatcher.SendGroupKeyActionEvent:input_type -> GroupKeyActionEvent - 25, // 27: Dispatcher.PutOverridesData:input_type -> OverridesData - 15, // 28: Dispatcher.Unsubscribe:input_type -> WorkerUnsubscribeRequest - 29, // 29: Dispatcher.RefreshTimeout:input_type -> RefreshTimeoutRequest - 31, // 30: Dispatcher.ReleaseSlot:input_type -> ReleaseSlotRequest - 11, // 31: Dispatcher.UpsertWorkerLabels:input_type -> UpsertWorkerLabelsRequest - 10, // 32: Dispatcher.Register:output_type -> WorkerRegisterResponse - 13, // 33: Dispatcher.Listen:output_type -> AssignedAction - 13, // 34: Dispatcher.ListenV2:output_type -> AssignedAction - 28, // 35: Dispatcher.Heartbeat:output_type -> HeartbeatResponse - 22, // 36: Dispatcher.SubscribeToWorkflowEvents:output_type -> WorkflowEvent - 23, // 37: Dispatcher.SubscribeToWorkflowRuns:output_type -> WorkflowRunEvent - 19, // 38: Dispatcher.SendStepActionEvent:output_type -> ActionEventResponse - 19, // 39: Dispatcher.SendGroupKeyActionEvent:output_type -> ActionEventResponse - 26, // 40: Dispatcher.PutOverridesData:output_type -> OverridesDataResponse - 16, // 41: Dispatcher.Unsubscribe:output_type -> WorkerUnsubscribeResponse - 30, // 42: Dispatcher.RefreshTimeout:output_type -> RefreshTimeoutResponse - 32, // 43: Dispatcher.ReleaseSlot:output_type -> ReleaseSlotResponse - 12, // 44: Dispatcher.UpsertWorkerLabels:output_type -> UpsertWorkerLabelsResponse - 32, // [32:45] is the sub-list for method output_type - 19, // [19:32] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 36, // 3: WorkerRegisterRequest.slot_config:type_name -> WorkerRegisterRequest.SlotConfigEntry + 37, // 4: UpsertWorkerLabelsRequest.labels:type_name -> UpsertWorkerLabelsRequest.LabelsEntry + 1, // 5: AssignedAction.action_type:type_name -> ActionType + 38, // 6: GroupKeyActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 2, // 7: GroupKeyActionEvent.event_type:type_name -> GroupKeyActionEventType + 38, // 8: StepActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 3, // 9: StepActionEvent.event_type:type_name -> StepActionEventType + 4, // 10: WorkflowEvent.resource_type:type_name -> ResourceType + 5, // 11: WorkflowEvent.event_type:type_name -> ResourceEventType + 38, // 12: WorkflowEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 6, // 13: WorkflowRunEvent.event_type:type_name -> WorkflowRunEventType + 38, // 14: WorkflowRunEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 24, // 15: WorkflowRunEvent.results:type_name -> StepRunResult + 38, // 16: HeartbeatRequest.heartbeat_at:type_name -> google.protobuf.Timestamp + 38, // 17: RefreshTimeoutResponse.timeout_at:type_name -> google.protobuf.Timestamp + 7, // 18: WorkerRegisterRequest.LabelsEntry.value:type_name -> WorkerLabels + 7, // 19: UpsertWorkerLabelsRequest.LabelsEntry.value:type_name -> WorkerLabels + 9, // 20: Dispatcher.Register:input_type -> WorkerRegisterRequest + 14, // 21: Dispatcher.Listen:input_type -> WorkerListenRequest + 14, // 22: Dispatcher.ListenV2:input_type -> WorkerListenRequest + 27, // 23: Dispatcher.Heartbeat:input_type -> HeartbeatRequest + 20, // 24: Dispatcher.SubscribeToWorkflowEvents:input_type -> SubscribeToWorkflowEventsRequest + 21, // 25: Dispatcher.SubscribeToWorkflowRuns:input_type -> SubscribeToWorkflowRunsRequest + 18, // 26: Dispatcher.SendStepActionEvent:input_type -> StepActionEvent + 17, // 27: Dispatcher.SendGroupKeyActionEvent:input_type -> GroupKeyActionEvent + 25, // 28: Dispatcher.PutOverridesData:input_type -> OverridesData + 15, // 29: Dispatcher.Unsubscribe:input_type -> WorkerUnsubscribeRequest + 29, // 30: Dispatcher.RefreshTimeout:input_type -> RefreshTimeoutRequest + 31, // 31: Dispatcher.ReleaseSlot:input_type -> ReleaseSlotRequest + 11, // 32: Dispatcher.UpsertWorkerLabels:input_type -> UpsertWorkerLabelsRequest + 33, // 33: Dispatcher.GetVersion:input_type -> GetVersionRequest + 10, // 34: Dispatcher.Register:output_type -> WorkerRegisterResponse + 13, // 35: Dispatcher.Listen:output_type -> AssignedAction + 13, // 36: Dispatcher.ListenV2:output_type -> AssignedAction + 28, // 37: Dispatcher.Heartbeat:output_type -> HeartbeatResponse + 22, // 38: Dispatcher.SubscribeToWorkflowEvents:output_type -> WorkflowEvent + 23, // 39: Dispatcher.SubscribeToWorkflowRuns:output_type -> WorkflowRunEvent + 19, // 40: Dispatcher.SendStepActionEvent:output_type -> ActionEventResponse + 19, // 41: Dispatcher.SendGroupKeyActionEvent:output_type -> ActionEventResponse + 26, // 42: Dispatcher.PutOverridesData:output_type -> OverridesDataResponse + 16, // 43: Dispatcher.Unsubscribe:output_type -> WorkerUnsubscribeResponse + 30, // 44: Dispatcher.RefreshTimeout:output_type -> RefreshTimeoutResponse + 32, // 45: Dispatcher.ReleaseSlot:output_type -> ReleaseSlotResponse + 12, // 46: Dispatcher.UpsertWorkerLabels:output_type -> UpsertWorkerLabelsResponse + 34, // 47: Dispatcher.GetVersion:output_type -> GetVersionResponse + 34, // [34:48] is the sub-list for method output_type + 20, // [20:34] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_dispatcher_proto_init() } @@ -3122,6 +3239,30 @@ func file_dispatcher_proto_init() { return nil } } + file_dispatcher_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dispatcher_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_dispatcher_proto_msgTypes[0].OneofWrappers = []interface{}{} file_dispatcher_proto_msgTypes[1].OneofWrappers = []interface{}{} @@ -3137,7 +3278,7 @@ func file_dispatcher_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_dispatcher_proto_rawDesc, NumEnums: 7, - NumMessages: 28, + NumMessages: 31, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go b/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go index 8884162b8c..7fc5c8e920 100644 --- a/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go +++ b/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go @@ -38,6 +38,10 @@ type DispatcherClient interface { RefreshTimeout(ctx context.Context, in *RefreshTimeoutRequest, opts ...grpc.CallOption) (*RefreshTimeoutResponse, error) ReleaseSlot(ctx context.Context, in *ReleaseSlotRequest, opts ...grpc.CallOption) (*ReleaseSlotResponse, error) UpsertWorkerLabels(ctx context.Context, in *UpsertWorkerLabelsRequest, opts ...grpc.CallOption) (*UpsertWorkerLabelsResponse, error) + // GetVersion returns the dispatcher protocol version as a simple integer. + // SDKs use this to determine feature support (e.g. slot_config registration). + // Old engines that do not implement this RPC will return UNIMPLEMENTED. + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) } type dispatcherClient struct { @@ -256,6 +260,15 @@ func (c *dispatcherClient) UpsertWorkerLabels(ctx context.Context, in *UpsertWor return out, nil } +func (c *dispatcherClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { + out := new(GetVersionResponse) + err := c.cc.Invoke(ctx, "/Dispatcher/GetVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // DispatcherServer is the server API for Dispatcher service. // All implementations must embed UnimplementedDispatcherServer // for forward compatibility @@ -276,6 +289,10 @@ type DispatcherServer interface { RefreshTimeout(context.Context, *RefreshTimeoutRequest) (*RefreshTimeoutResponse, error) ReleaseSlot(context.Context, *ReleaseSlotRequest) (*ReleaseSlotResponse, error) UpsertWorkerLabels(context.Context, *UpsertWorkerLabelsRequest) (*UpsertWorkerLabelsResponse, error) + // GetVersion returns the dispatcher protocol version as a simple integer. + // SDKs use this to determine feature support (e.g. slot_config registration). + // Old engines that do not implement this RPC will return UNIMPLEMENTED. + GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) mustEmbedUnimplementedDispatcherServer() } @@ -322,6 +339,9 @@ func (UnimplementedDispatcherServer) ReleaseSlot(context.Context, *ReleaseSlotRe func (UnimplementedDispatcherServer) UpsertWorkerLabels(context.Context, *UpsertWorkerLabelsRequest) (*UpsertWorkerLabelsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpsertWorkerLabels not implemented") } +func (UnimplementedDispatcherServer) GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") +} func (UnimplementedDispatcherServer) mustEmbedUnimplementedDispatcherServer() {} // UnsafeDispatcherServer may be embedded to opt out of forward compatibility for this service. @@ -586,6 +606,24 @@ func _Dispatcher_UpsertWorkerLabels_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _Dispatcher_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/Dispatcher/GetVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Dispatcher_ServiceDesc is the grpc.ServiceDesc for Dispatcher service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -629,6 +667,10 @@ var Dispatcher_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpsertWorkerLabels", Handler: _Dispatcher_UpsertWorkerLabels_Handler, }, + { + MethodName: "GetVersion", + Handler: _Dispatcher_GetVersion_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/internal/services/dispatcher/dispatcher.go b/internal/services/dispatcher/dispatcher.go index 977bab3484..b8f2a64777 100644 --- a/internal/services/dispatcher/dispatcher.go +++ b/internal/services/dispatcher/dispatcher.go @@ -48,6 +48,7 @@ type DispatcherImpl struct { dispatcherId uuid.UUID workers *workers a *hatcheterrors.Wrapped + version string } var ErrWorkerNotFound = fmt.Errorf("worker not found") @@ -124,6 +125,7 @@ type DispatcherOpts struct { payloadSizeThreshold int defaultMaxWorkerBacklogSize int64 workflowRunBufferSize int + version string } func defaultDispatcherOpts() *DispatcherOpts { @@ -201,6 +203,12 @@ func WithWorkflowRunBufferSize(size int) DispatcherOpt { } } +func WithVersion(version string) DispatcherOpt { + return func(opts *DispatcherOpts) { + opts.version = version + } +} + func New(fs ...DispatcherOpt) (*DispatcherImpl, error) { opts := defaultDispatcherOpts() @@ -250,6 +258,7 @@ func New(fs ...DispatcherOpt) (*DispatcherImpl, error) { payloadSizeThreshold: opts.payloadSizeThreshold, defaultMaxWorkerBacklogSize: opts.defaultMaxWorkerBacklogSize, workflowRunBufferSize: opts.workflowRunBufferSize, + version: opts.version, }, nil } diff --git a/internal/services/dispatcher/server.go b/internal/services/dispatcher/server.go index 5606742ad8..4132278f19 100644 --- a/internal/services/dispatcher/server.go +++ b/internal/services/dispatcher/server.go @@ -53,9 +53,20 @@ func (s *DispatcherImpl) Register(ctx context.Context, request *contracts.Worker } } + if len(request.SlotConfig) > 0 { + opts.SlotConfig = request.SlotConfig + } else { + // default to 100 slots + opts.SlotConfig = map[string]int32{v1.SlotTypeDefault: 100} + } + + // fixme: deprecated remove in a future release feb6 2026 if request.Slots != nil { - mr := int(*request.Slots) - opts.MaxRuns = &mr + if len(request.SlotConfig) > 0 { + return nil, status.Errorf(codes.InvalidArgument, "either slot_config or slots (deprecated) must be provided, not both") + } + + opts.SlotConfig = map[string]int32{v1.SlotTypeDefault: *request.Slots} } if apiErrors, err := s.v.ValidateAPI(opts); err != nil { @@ -640,3 +651,9 @@ func UnmarshalPayload[T any](payload interface{}) (T, error) { return result, nil } + +func (s *DispatcherImpl) GetVersion(ctx context.Context, req *contracts.GetVersionRequest) (*contracts.GetVersionResponse, error) { + return &contracts.GetVersionResponse{ + Version: s.version, + }, nil +} diff --git a/internal/services/dispatcher/subscribed_worker.go b/internal/services/dispatcher/subscribed_worker.go index cb98f550a9..0e057c603d 100644 --- a/internal/services/dispatcher/subscribed_worker.go +++ b/internal/services/dispatcher/subscribed_worker.go @@ -4,6 +4,7 @@ import ( "sync" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts" ) diff --git a/internal/services/dispatcher/subscribed_worker_v1.go b/internal/services/dispatcher/subscribed_worker_v1.go index a38531159e..cd35fc3933 100644 --- a/internal/services/dispatcher/subscribed_worker_v1.go +++ b/internal/services/dispatcher/subscribed_worker_v1.go @@ -10,6 +10,7 @@ import ( "google.golang.org/grpc" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts" tasktypesv1 "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" diff --git a/internal/services/ingestor/server.go b/internal/services/ingestor/server.go index affe683a67..43ffcd8f98 100644 --- a/internal/services/ingestor/server.go +++ b/internal/services/ingestor/server.go @@ -8,6 +8,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/datautils" "github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts" "github.com/hatchet-dev/hatchet/pkg/constants" diff --git a/internal/services/ingestor/server_v1.go b/internal/services/ingestor/server_v1.go index 14e1558951..f60f88838c 100644 --- a/internal/services/ingestor/server_v1.go +++ b/internal/services/ingestor/server_v1.go @@ -6,6 +6,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" diff --git a/internal/services/shared/proto/v1/workflows.pb.go b/internal/services/shared/proto/v1/workflows.pb.go index 8644e08398..bc41f31310 100644 --- a/internal/services/shared/proto/v1/workflows.pb.go +++ b/internal/services/shared/proto/v1/workflows.pb.go @@ -1073,19 +1073,21 @@ type CreateTaskOpts struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name - Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id - Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout - Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON - Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task - Retries int32 `protobuf:"varint,6,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0 - RateLimits []*CreateTaskRateLimit `protobuf:"bytes,7,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task - WorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,8,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task - BackoffFactor *float32 `protobuf:"fixed32,9,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task - BackoffMaxSeconds *int32 `protobuf:"varint,10,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task - Concurrency []*Concurrency `protobuf:"bytes,11,rep,name=concurrency,proto3" json:"concurrency,omitempty"` // (optional) the task concurrency options - Conditions *TaskConditions `protobuf:"bytes,12,opt,name=conditions,proto3,oneof" json:"conditions,omitempty"` // (optional) the task conditions for creating the task - ScheduleTimeout *string `protobuf:"bytes,13,opt,name=schedule_timeout,json=scheduleTimeout,proto3,oneof" json:"schedule_timeout,omitempty"` // (optional) the timeout for the schedule + ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name + Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id + Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout + Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON + Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task + Retries int32 `protobuf:"varint,6,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0 + RateLimits []*CreateTaskRateLimit `protobuf:"bytes,7,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task + WorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,8,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task + BackoffFactor *float32 `protobuf:"fixed32,9,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task + BackoffMaxSeconds *int32 `protobuf:"varint,10,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task + Concurrency []*Concurrency `protobuf:"bytes,11,rep,name=concurrency,proto3" json:"concurrency,omitempty"` // (optional) the task concurrency options + Conditions *TaskConditions `protobuf:"bytes,12,opt,name=conditions,proto3,oneof" json:"conditions,omitempty"` // (optional) the task conditions for creating the task + ScheduleTimeout *string `protobuf:"bytes,13,opt,name=schedule_timeout,json=scheduleTimeout,proto3,oneof" json:"schedule_timeout,omitempty"` // (optional) the timeout for the schedule + IsDurable bool `protobuf:"varint,14,opt,name=is_durable,json=isDurable,proto3" json:"is_durable,omitempty"` // (optional) whether the task is durable + SlotRequests map[string]int32 `protobuf:"bytes,15,rep,name=slot_requests,json=slotRequests,proto3" json:"slot_requests,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // (optional) slot requests (slot_type -> units) } func (x *CreateTaskOpts) Reset() { @@ -1211,6 +1213,20 @@ func (x *CreateTaskOpts) GetScheduleTimeout() string { return "" } +func (x *CreateTaskOpts) GetIsDurable() bool { + if x != nil { + return x.IsDurable + } + return false +} + +func (x *CreateTaskOpts) GetSlotRequests() map[string]int32 { + if x != nil { + return x.SlotRequests + } + return nil +} + type CreateTaskRateLimit struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1702,7 +1718,7 @@ var file_v1_workflows_proto_rawDesc = []byte{ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x09, - 0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xda, 0x05, 0x0a, 0x0e, 0x43, 0x72, + 0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x85, 0x07, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, @@ -1737,136 +1753,147 @@ var file_v1_workflows_proto_rawDesc = []byte{ 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x1a, 0x58, 0x0a, - 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b, - 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62, - 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xb8, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x19, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, - 0x00, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, - 0x07, 0x6b, 0x65, 0x79, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, - 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x02, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, - 0x2f, 0x0a, 0x11, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, - 0x12, 0x36, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, - 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, - 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, - 0x0a, 0x12, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xc5, 0x01, 0x0a, - 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1f, - 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, + 0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x0f, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x6c, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x42, 0x0d, 0x0a, + 0x0b, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x13, 0x0a, 0x11, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x22, 0xb8, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x05, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, + 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, + 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x45, + 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f, + 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x09, 0x75, 0x6e, + 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, + 0x09, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, + 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x1d, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x22, 0x37, + 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xc5, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x06, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, + 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, + 0xaf, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, - 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1f, - 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x42, - 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x22, 0xaf, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, - 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x4e, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, - 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x24, 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, - 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x41, 0x52, 0x44, 0x10, 0x01, 0x2a, 0x5d, 0x0a, 0x11, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, - 0x52, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x57, 0x45, 0x45, 0x4b, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, - 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x2a, 0x4e, 0x0a, 0x09, 0x52, - 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, - 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, - 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x2a, 0x7f, 0x0a, 0x18, 0x43, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, - 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x01, - 0x12, 0x10, 0x0a, 0x0c, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, - 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, 0x4f, 0x55, 0x4e, - 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x41, 0x4e, - 0x43, 0x45, 0x4c, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x85, 0x01, 0x0a, - 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, - 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, - 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, - 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, - 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, - 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x0d, 0x0a, - 0x09, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, - 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, - 0x41, 0x4c, 0x10, 0x05, 0x32, 0xfd, 0x02, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x52, 0x65, 0x70, - 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x54, 0x72, 0x69, - 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x12, - 0x1d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, - 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, - 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, + 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, + 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x1a, 0x4e, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x2a, 0x24, 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, + 0x65, 0x67, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, + 0x04, 0x48, 0x41, 0x52, 0x44, 0x10, 0x01, 0x2a, 0x5d, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, + 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07, + 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, + 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x2a, 0x4e, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x2a, 0x7f, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, + 0x4f, 0x50, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x51, + 0x55, 0x45, 0x55, 0x45, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x15, 0x0a, + 0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, + 0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x4e, + 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x85, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x02, 0x12, 0x19, 0x0a, + 0x15, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, + 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x45, 0x53, 0x53, + 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x53, 0x53, 0x5f, + 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x05, 0x32, + 0xfd, 0x02, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x52, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, + 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, + 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, + 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x12, 0x1d, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, + 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1882,7 +1909,7 @@ func file_v1_workflows_proto_rawDescGZIP() []byte { } var file_v1_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_v1_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_v1_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_v1_workflows_proto_goTypes = []interface{}{ (StickyStrategy)(0), // 0: v1.StickyStrategy (RateLimitDuration)(0), // 1: v1.RateLimitDuration @@ -1907,15 +1934,16 @@ var file_v1_workflows_proto_goTypes = []interface{}{ (*TaskRunDetail)(nil), // 20: v1.TaskRunDetail (*GetRunDetailsResponse)(nil), // 21: v1.GetRunDetailsResponse nil, // 22: v1.CreateTaskOpts.WorkerLabelsEntry - nil, // 23: v1.GetRunDetailsResponse.TaskRunsEntry - (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp - (*TaskConditions)(nil), // 25: v1.TaskConditions + nil, // 23: v1.CreateTaskOpts.SlotRequestsEntry + nil, // 24: v1.GetRunDetailsResponse.TaskRunsEntry + (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp + (*TaskConditions)(nil), // 26: v1.TaskConditions } var file_v1_workflows_proto_depIdxs = []int32{ 7, // 0: v1.CancelTasksRequest.filter:type_name -> v1.TasksFilter 7, // 1: v1.ReplayTasksRequest.filter:type_name -> v1.TasksFilter - 24, // 2: v1.TasksFilter.since:type_name -> google.protobuf.Timestamp - 24, // 3: v1.TasksFilter.until:type_name -> google.protobuf.Timestamp + 25, // 2: v1.TasksFilter.since:type_name -> google.protobuf.Timestamp + 25, // 3: v1.TasksFilter.until:type_name -> google.protobuf.Timestamp 16, // 4: v1.CreateWorkflowVersionRequest.tasks:type_name -> v1.CreateTaskOpts 14, // 5: v1.CreateWorkflowVersionRequest.concurrency:type_name -> v1.Concurrency 16, // 6: v1.CreateWorkflowVersionRequest.on_failure_task:type_name -> v1.CreateTaskOpts @@ -1927,28 +1955,29 @@ var file_v1_workflows_proto_depIdxs = []int32{ 17, // 12: v1.CreateTaskOpts.rate_limits:type_name -> v1.CreateTaskRateLimit 22, // 13: v1.CreateTaskOpts.worker_labels:type_name -> v1.CreateTaskOpts.WorkerLabelsEntry 14, // 14: v1.CreateTaskOpts.concurrency:type_name -> v1.Concurrency - 25, // 15: v1.CreateTaskOpts.conditions:type_name -> v1.TaskConditions - 1, // 16: v1.CreateTaskRateLimit.duration:type_name -> v1.RateLimitDuration - 2, // 17: v1.TaskRunDetail.status:type_name -> v1.RunStatus - 2, // 18: v1.GetRunDetailsResponse.status:type_name -> v1.RunStatus - 23, // 19: v1.GetRunDetailsResponse.task_runs:type_name -> v1.GetRunDetailsResponse.TaskRunsEntry - 15, // 20: v1.CreateTaskOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels - 20, // 21: v1.GetRunDetailsResponse.TaskRunsEntry.value:type_name -> v1.TaskRunDetail - 12, // 22: v1.AdminService.PutWorkflow:input_type -> v1.CreateWorkflowVersionRequest - 5, // 23: v1.AdminService.CancelTasks:input_type -> v1.CancelTasksRequest - 6, // 24: v1.AdminService.ReplayTasks:input_type -> v1.ReplayTasksRequest - 10, // 25: v1.AdminService.TriggerWorkflowRun:input_type -> v1.TriggerWorkflowRunRequest - 19, // 26: v1.AdminService.GetRunDetails:input_type -> v1.GetRunDetailsRequest - 18, // 27: v1.AdminService.PutWorkflow:output_type -> v1.CreateWorkflowVersionResponse - 8, // 28: v1.AdminService.CancelTasks:output_type -> v1.CancelTasksResponse - 9, // 29: v1.AdminService.ReplayTasks:output_type -> v1.ReplayTasksResponse - 11, // 30: v1.AdminService.TriggerWorkflowRun:output_type -> v1.TriggerWorkflowRunResponse - 21, // 31: v1.AdminService.GetRunDetails:output_type -> v1.GetRunDetailsResponse - 27, // [27:32] is the sub-list for method output_type - 22, // [22:27] is the sub-list for method input_type - 22, // [22:22] is the sub-list for extension type_name - 22, // [22:22] is the sub-list for extension extendee - 0, // [0:22] is the sub-list for field type_name + 26, // 15: v1.CreateTaskOpts.conditions:type_name -> v1.TaskConditions + 23, // 16: v1.CreateTaskOpts.slot_requests:type_name -> v1.CreateTaskOpts.SlotRequestsEntry + 1, // 17: v1.CreateTaskRateLimit.duration:type_name -> v1.RateLimitDuration + 2, // 18: v1.TaskRunDetail.status:type_name -> v1.RunStatus + 2, // 19: v1.GetRunDetailsResponse.status:type_name -> v1.RunStatus + 24, // 20: v1.GetRunDetailsResponse.task_runs:type_name -> v1.GetRunDetailsResponse.TaskRunsEntry + 15, // 21: v1.CreateTaskOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels + 20, // 22: v1.GetRunDetailsResponse.TaskRunsEntry.value:type_name -> v1.TaskRunDetail + 12, // 23: v1.AdminService.PutWorkflow:input_type -> v1.CreateWorkflowVersionRequest + 5, // 24: v1.AdminService.CancelTasks:input_type -> v1.CancelTasksRequest + 6, // 25: v1.AdminService.ReplayTasks:input_type -> v1.ReplayTasksRequest + 10, // 26: v1.AdminService.TriggerWorkflowRun:input_type -> v1.TriggerWorkflowRunRequest + 19, // 27: v1.AdminService.GetRunDetails:input_type -> v1.GetRunDetailsRequest + 18, // 28: v1.AdminService.PutWorkflow:output_type -> v1.CreateWorkflowVersionResponse + 8, // 29: v1.AdminService.CancelTasks:output_type -> v1.CancelTasksResponse + 9, // 30: v1.AdminService.ReplayTasks:output_type -> v1.ReplayTasksResponse + 11, // 31: v1.AdminService.TriggerWorkflowRun:output_type -> v1.TriggerWorkflowRunResponse + 21, // 32: v1.AdminService.GetRunDetails:output_type -> v1.GetRunDetailsResponse + 28, // [28:33] is the sub-list for method output_type + 23, // [23:28] is the sub-list for method input_type + 23, // [23:23] is the sub-list for extension type_name + 23, // [23:23] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name } func init() { file_v1_workflows_proto_init() } @@ -2180,7 +2209,7 @@ func file_v1_workflows_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_v1_workflows_proto_rawDesc, NumEnums: 5, - NumMessages: 19, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/services/shared/tasktypes/v1/event.go b/internal/services/shared/tasktypes/v1/event.go index 589f742677..ba920058d6 100644 --- a/internal/services/shared/tasktypes/v1/event.go +++ b/internal/services/shared/tasktypes/v1/event.go @@ -4,6 +4,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" v1 "github.com/hatchet-dev/hatchet/pkg/repository" ) diff --git a/internal/services/shared/tasktypes/v1/scheduler.go b/internal/services/shared/tasktypes/v1/scheduler.go index 96fd6dad79..cd56cc0c92 100644 --- a/internal/services/shared/tasktypes/v1/scheduler.go +++ b/internal/services/shared/tasktypes/v1/scheduler.go @@ -2,6 +2,7 @@ package v1 import ( "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" v1 "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" diff --git a/internal/services/ticker/schedule_workflow.go b/internal/services/ticker/schedule_workflow.go index 8c665efd19..51c5673f63 100644 --- a/internal/services/ticker/schedule_workflow.go +++ b/internal/services/ticker/schedule_workflow.go @@ -6,6 +6,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/internal/testutils/env.go b/internal/testutils/env.go index f4ef92391c..a8f2efd20a 100644 --- a/internal/testutils/env.go +++ b/internal/testutils/env.go @@ -10,10 +10,11 @@ import ( "testing" "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/hatchet-dev/hatchet/pkg/config/loader" "github.com/hatchet-dev/hatchet/pkg/config/server" v1 "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/jackc/pgx/v5" ) func Prepare(t *testing.T) { diff --git a/pkg/client/dispatcher.go b/pkg/client/dispatcher.go index 5d99ab8bb6..bd9a541a9c 100644 --- a/pkg/client/dispatcher.go +++ b/pkg/client/dispatcher.go @@ -25,6 +25,10 @@ import ( type DispatcherClient interface { GetActionListener(ctx context.Context, req *GetActionListenerRequest) (WorkerActionListener, *string, error) + // GetVersion calls the GetVersion RPC. Returns the engine semantic version string. + // Old engines that do not implement this will return codes.Unimplemented. + GetVersion(ctx context.Context) (string, error) + SendStepActionEvent(ctx context.Context, in *ActionEvent) (*ActionEventResponse, error) SendGroupKeyActionEvent(ctx context.Context, in *ActionEvent) (*ActionEventResponse, error) @@ -47,9 +51,14 @@ type GetActionListenerRequest struct { WorkerName string Services []string Actions []string - Slots *int + SlotConfig map[string]int32 Labels map[string]interface{} WebhookId *string + + // LegacySlots, when non-nil, causes the registration to use the deprecated + // `slots` proto field instead of `slot_config`. This is for backward + // compatibility with engines that do not support multiple slot types. + LegacySlots *int32 } // ActionPayload unmarshals the action payload into the target. It also validates the resulting target. @@ -270,9 +279,12 @@ func (d *dispatcherClientImpl) newActionListener(ctx context.Context, req *GetAc } } - if req.Slots != nil { - mr := int32(*req.Slots) // nolint: gosec - registerReq.Slots = &mr + if req.LegacySlots != nil { + registerReq.Slots = req.LegacySlots + } else if len(req.SlotConfig) > 0 { + registerReq.SlotConfig = req.SlotConfig + } else { + return nil, nil, fmt.Errorf("slot config is required for worker registration") } // register the worker @@ -534,6 +546,14 @@ func (a *actionListenerImpl) Unregister() error { return nil } +func (d *dispatcherClientImpl) GetVersion(ctx context.Context) (string, error) { + resp, err := d.client.GetVersion(d.ctx.newContext(ctx), &dispatchercontracts.GetVersionRequest{}) + if err != nil { + return "", err + } + return resp.Version, nil +} + func (d *dispatcherClientImpl) GetActionListener(ctx context.Context, req *GetActionListenerRequest) (WorkerActionListener, *string, error) { return d.newActionListener(ctx, req) } diff --git a/pkg/client/rest/gen.go b/pkg/client/rest/gen.go index 76816205e5..d413f0c92a 100644 --- a/pkg/client/rest/gen.go +++ b/pkg/client/rest/gen.go @@ -996,15 +996,21 @@ type SlackWebhook struct { // Step defines model for Step. type Step struct { - Action string `json:"action"` - Children *[]string `json:"children,omitempty"` - JobId string `json:"jobId"` - Metadata APIResourceMeta `json:"metadata"` - Parents *[]string `json:"parents,omitempty"` + Action string `json:"action"` + Children *[]string `json:"children,omitempty"` + + // IsDurable Whether the step is durable. + IsDurable *bool `json:"isDurable,omitempty"` + JobId string `json:"jobId"` + Metadata APIResourceMeta `json:"metadata"` + Parents *[]string `json:"parents,omitempty"` // ReadableId The readable id of the step. ReadableId string `json:"readableId"` - TenantId string `json:"tenantId"` + + // SlotRequests Slot requests for the step (slot_type -> units). + SlotRequests *map[string]int `json:"slotRequests,omitempty"` + TenantId string `json:"tenantId"` // Timeout The timeout of the step. Timeout *string `json:"timeout,omitempty"` @@ -2120,9 +2126,6 @@ type Worker struct { // Actions The actions this worker can perform. Actions *[]string `json:"actions,omitempty"` - // AvailableRuns The number of runs this worker can execute concurrently. - AvailableRuns *int `json:"availableRuns,omitempty"` - // DispatcherId the id of the assigned dispatcher, in UUID format DispatcherId *openapi_types.UUID `json:"dispatcherId,omitempty"` @@ -2133,11 +2136,8 @@ type Worker struct { LastHeartbeatAt *time.Time `json:"lastHeartbeatAt,omitempty"` // LastListenerEstablished The time this worker last sent a heartbeat. - LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"` - - // MaxRuns The maximum number of runs this worker can execute concurrently. - MaxRuns *int `json:"maxRuns,omitempty"` - Metadata APIResourceMeta `json:"metadata"` + LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"` + Metadata APIResourceMeta `json:"metadata"` // Name The name of the worker. Name string `json:"name"` @@ -2149,6 +2149,9 @@ type Worker struct { RegisteredWorkflows *[]RegisteredWorkflow `json:"registeredWorkflows,omitempty"` RuntimeInfo *WorkerRuntimeInfo `json:"runtimeInfo,omitempty"` + // SlotConfig Slot availability and limits for this worker (slot_type -> { available, limit }). + SlotConfig *map[string]WorkerSlotConfig `json:"slotConfig,omitempty"` + // Slots The semaphore slot state for the worker. Slots *[]SemaphoreSlots `json:"slots,omitempty"` @@ -2194,6 +2197,15 @@ type WorkerRuntimeInfo struct { // WorkerRuntimeSDKs defines model for WorkerRuntimeSDKs. type WorkerRuntimeSDKs string +// WorkerSlotConfig Slot availability and limits for a slot type. +type WorkerSlotConfig struct { + // Available The number of available units for this slot type. + Available *int `json:"available,omitempty"` + + // Limit The maximum number of units for this slot type. + Limit int `json:"limit"` +} + // WorkerType defines model for WorkerType. type WorkerType string diff --git a/pkg/repository/api_token.go b/pkg/repository/api_token.go index ca0091f9d3..713e048e6d 100644 --- a/pkg/repository/api_token.go +++ b/pkg/repository/api_token.go @@ -5,6 +5,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/cache" "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" diff --git a/pkg/repository/dispatcher.go b/pkg/repository/dispatcher.go index 17c9f62be6..e63fd01d22 100644 --- a/pkg/repository/dispatcher.go +++ b/pkg/repository/dispatcher.go @@ -5,6 +5,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/olappayload.go b/pkg/repository/olappayload.go index 7f90f96c40..6ef3b6de6a 100644 --- a/pkg/repository/olappayload.go +++ b/pkg/repository/olappayload.go @@ -2,6 +2,7 @@ package repository import ( "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/internal/msgqueue" ) diff --git a/pkg/repository/output.go b/pkg/repository/output.go index 22eb4e14bc..fb54c731ef 100644 --- a/pkg/repository/output.go +++ b/pkg/repository/output.go @@ -4,6 +4,7 @@ import ( "encoding/json" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/scheduler.go b/pkg/repository/scheduler.go index a33a7fbdc9..7bb2218ed9 100644 --- a/pkg/repository/scheduler.go +++ b/pkg/repository/scheduler.go @@ -4,6 +4,7 @@ import ( "context" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) @@ -38,12 +39,15 @@ type QueueRepository interface { GetTaskRateLimits(ctx context.Context, tx *OptimisticTx, queueItems []*sqlcv1.V1QueueItem) (map[int64]map[string]int32, error) RequeueRateLimitedItems(ctx context.Context, tenantId uuid.UUID, queueName string) ([]*sqlcv1.RequeueRateLimitedQueueItemsRow, error) GetDesiredLabels(ctx context.Context, tx *OptimisticTx, stepIds []uuid.UUID) (map[uuid.UUID][]*sqlcv1.GetDesiredLabelsRow, error) + GetStepSlotRequests(ctx context.Context, tx *OptimisticTx, stepIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error) Cleanup() } type AssignmentRepository interface { ListActionsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) + ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersAndTypesParams) ([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, error) + ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) } type OptimisticSchedulingRepository interface { diff --git a/pkg/repository/scheduler_assignment.go b/pkg/repository/scheduler_assignment.go index aa390e0272..68e8490f0c 100644 --- a/pkg/repository/scheduler_assignment.go +++ b/pkg/repository/scheduler_assignment.go @@ -4,6 +4,7 @@ import ( "context" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" "github.com/hatchet-dev/hatchet/pkg/telemetry" ) @@ -34,3 +35,20 @@ func (d *assignmentRepository) ListAvailableSlotsForWorkers(ctx context.Context, return d.queries.ListAvailableSlotsForWorkers(ctx, d.pool, params) } + +func (d *assignmentRepository) ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersAndTypesParams) ([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, error) { + ctx, span := telemetry.NewSpan(ctx, "list-available-slots-for-workers-and-types") + defer span.End() + + return d.queries.ListAvailableSlotsForWorkersAndTypes(ctx, d.pool, params) +} + +func (d *assignmentRepository) ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + ctx, span := telemetry.NewSpan(ctx, "list-worker-slot-configs") + defer span.End() + + return d.queries.ListWorkerSlotConfigs(ctx, d.pool, sqlcv1.ListWorkerSlotConfigsParams{ + Tenantid: tenantId, + Workerids: workerIds, + }) +} diff --git a/pkg/repository/scheduler_concurrency.go b/pkg/repository/scheduler_concurrency.go index ae1701b140..9b414519ac 100644 --- a/pkg/repository/scheduler_concurrency.go +++ b/pkg/repository/scheduler_concurrency.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/scheduler_lease.go b/pkg/repository/scheduler_lease.go index 268bdf8cb3..1837011787 100644 --- a/pkg/repository/scheduler_lease.go +++ b/pkg/repository/scheduler_lease.go @@ -13,10 +13,9 @@ import ( ) type ListActiveWorkersResult struct { - ID uuid.UUID - MaxRuns int - Name string - Labels []*sqlcv1.ListManyWorkerLabelsRow + ID uuid.UUID + Name string + Labels []*sqlcv1.ListManyWorkerLabelsRow } type leaseRepository struct { @@ -149,10 +148,9 @@ func (d *leaseRepository) ListActiveWorkers(ctx context.Context, tenantId uuid.U for _, worker := range activeWorkers { res = append(res, &ListActiveWorkersResult{ - ID: worker.ID, - MaxRuns: int(worker.MaxRuns), - Labels: workerIdsToLabels[worker.ID], - Name: worker.Name, + ID: worker.ID, + Labels: workerIdsToLabels[worker.ID], + Name: worker.Name, }) } @@ -189,10 +187,9 @@ func (d *leaseRepository) GetActiveWorker(ctx context.Context, tenantId, workerI } return &ListActiveWorkersResult{ - ID: worker.Worker.ID, - MaxRuns: int(worker.Worker.MaxRuns), - Labels: workerIdsToLabels[worker.Worker.ID], - Name: worker.Worker.Name, + ID: worker.Worker.ID, + Labels: workerIdsToLabels[worker.Worker.ID], + Name: worker.Worker.Name, }, nil } diff --git a/pkg/repository/scheduler_queue.go b/pkg/repository/scheduler_queue.go index 84e94848a3..3f1462de91 100644 --- a/pkg/repository/scheduler_queue.go +++ b/pkg/repository/scheduler_queue.go @@ -639,6 +639,63 @@ func (d *queueRepository) GetDesiredLabels(ctx context.Context, tx *OptimisticTx return stepIdToLabels, nil } +func (d *queueRepository) GetStepSlotRequests(ctx context.Context, tx *OptimisticTx, stepIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error) { + ctx, span := telemetry.NewSpan(ctx, "get-step-slot-requests") + defer span.End() + + uniqueStepIds := sqlchelpers.UniqueSet(stepIds) + + stepIdsToLookup := make([]uuid.UUID, 0, len(uniqueStepIds)) + stepIdToRequests := make(map[uuid.UUID]map[string]int32, len(uniqueStepIds)) + + for _, stepId := range uniqueStepIds { + if value, found := d.stepIdSlotRequestsCache.Get(stepId); found { + stepIdToRequests[stepId] = value + } else { + stepIdsToLookup = append(stepIdsToLookup, stepId) + } + } + + if len(stepIdsToLookup) == 0 { + return stepIdToRequests, nil + } + + var queryTx sqlcv1.DBTX + + if tx != nil { + queryTx = tx.tx + } else { + queryTx = d.pool + } + + rows, err := d.queries.GetStepSlotRequests(ctx, queryTx, sqlcv1.GetStepSlotRequestsParams{ + Stepids: stepIdsToLookup, + Tenantid: d.tenantId, + }) + if err != nil { + return nil, err + } + + for _, row := range rows { + if _, ok := stepIdToRequests[row.StepID]; !ok { + stepIdToRequests[row.StepID] = make(map[string]int32) + } + + stepIdToRequests[row.StepID][row.SlotType] = row.Units + } + + // cache empty results so we skip DB lookups for steps without explicit slot requests + for _, stepId := range stepIdsToLookup { + if _, ok := stepIdToRequests[stepId]; !ok { + stepIdToRequests[stepId] = map[string]int32{} + } + + d.stepIdSlotRequestsCache.Add(stepId, stepIdToRequests[stepId]) + } + + return stepIdToRequests, nil +} + func (d *queueRepository) RequeueRateLimitedItems(ctx context.Context, tenantId uuid.UUID, queueName string) ([]*sqlcv1.RequeueRateLimitedQueueItemsRow, error) { tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, d.pool, d.l) diff --git a/pkg/repository/shared.go b/pkg/repository/shared.go index 0e8972a05f..0274279b66 100644 --- a/pkg/repository/shared.go +++ b/pkg/repository/shared.go @@ -38,6 +38,7 @@ type sharedRepository struct { tenantIdWorkflowNameCache *expirable.LRU[string, *sqlcv1.ListWorkflowsByNamesRow] stepsInWorkflowVersionCache *expirable.LRU[uuid.UUID, []*sqlcv1.ListStepsByWorkflowVersionIdsRow] stepIdLabelsCache *expirable.LRU[uuid.UUID, []*sqlcv1.GetDesiredLabelsRow] + stepIdSlotRequestsCache *expirable.LRU[uuid.UUID, map[string]int32] celParser *cel.CELParser env *celgo.Env @@ -68,6 +69,7 @@ func newSharedRepository( tenantIdWorkflowNameCache := expirable.NewLRU(10000, func(key string, value *sqlcv1.ListWorkflowsByNamesRow) {}, 5*time.Second) stepsInWorkflowVersionCache := expirable.NewLRU(10000, func(key uuid.UUID, value []*sqlcv1.ListStepsByWorkflowVersionIdsRow) {}, 5*time.Minute) stepIdLabelsCache := expirable.NewLRU(10000, func(key uuid.UUID, value []*sqlcv1.GetDesiredLabelsRow) {}, 5*time.Minute) + stepIdSlotRequestsCache := expirable.NewLRU(10000, func(key uuid.UUID, value map[string]int32) {}, 5*time.Minute) celParser := cel.NewCELParser() @@ -97,6 +99,7 @@ func newSharedRepository( tenantIdWorkflowNameCache: tenantIdWorkflowNameCache, stepsInWorkflowVersionCache: stepsInWorkflowVersionCache, stepIdLabelsCache: stepIdLabelsCache, + stepIdSlotRequestsCache: stepIdSlotRequestsCache, celParser: celParser, env: env, taskLookupCache: lookupCache, diff --git a/pkg/repository/slack.go b/pkg/repository/slack.go index 08b223f58b..1945425ee9 100644 --- a/pkg/repository/slack.go +++ b/pkg/repository/slack.go @@ -4,6 +4,7 @@ import ( "context" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/slot_types.go b/pkg/repository/slot_types.go new file mode 100644 index 0000000000..42f774e204 --- /dev/null +++ b/pkg/repository/slot_types.go @@ -0,0 +1,7 @@ +package repository + +// SlotType constants for worker slot configurations. +const ( + SlotTypeDefault = "default" + SlotTypeDurable = "durable" +) diff --git a/pkg/repository/sns.go b/pkg/repository/sns.go index 5921f68a7f..afe2ea392c 100644 --- a/pkg/repository/sns.go +++ b/pkg/repository/sns.go @@ -4,6 +4,7 @@ import ( "context" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/sqlcv1/lease.sql b/pkg/repository/sqlcv1/lease.sql index 3027a4d976..32ac9e8374 100644 --- a/pkg/repository/sqlcv1/lease.sql +++ b/pkg/repository/sqlcv1/lease.sql @@ -50,11 +50,12 @@ RETURNING l.*; -- name: ListActiveWorkers :many SELECT - w."id", - w."maxRuns", + DISTINCT w."id", w."name" FROM "Worker" w +JOIN + v1_worker_slot_config wsc ON w."id" = wsc."worker_id" WHERE w."tenantId" = @tenantId::uuid AND w."dispatcherId" IS NOT NULL diff --git a/pkg/repository/sqlcv1/lease.sql.go b/pkg/repository/sqlcv1/lease.sql.go index 2119947394..8fe7d0b712 100644 --- a/pkg/repository/sqlcv1/lease.sql.go +++ b/pkg/repository/sqlcv1/lease.sql.go @@ -106,11 +106,12 @@ func (q *Queries) GetLeasesToAcquire(ctx context.Context, db DBTX, arg GetLeases const listActiveWorkers = `-- name: ListActiveWorkers :many SELECT - w."id", - w."maxRuns", + DISTINCT w."id", w."name" FROM "Worker" w +JOIN + v1_worker_slot_config wsc ON w."id" = wsc."worker_id" WHERE w."tenantId" = $1::uuid AND w."dispatcherId" IS NOT NULL @@ -120,9 +121,8 @@ WHERE ` type ListActiveWorkersRow struct { - ID uuid.UUID `json:"id"` - MaxRuns int32 `json:"maxRuns"` - Name string `json:"name"` + ID uuid.UUID `json:"id"` + Name string `json:"name"` } func (q *Queries) ListActiveWorkers(ctx context.Context, db DBTX, tenantid uuid.UUID) ([]*ListActiveWorkersRow, error) { @@ -134,7 +134,7 @@ func (q *Queries) ListActiveWorkers(ctx context.Context, db DBTX, tenantid uuid. var items []*ListActiveWorkersRow for rows.Next() { var i ListActiveWorkersRow - if err := rows.Scan(&i.ID, &i.MaxRuns, &i.Name); err != nil { + if err := rows.Scan(&i.ID, &i.Name); err != nil { return nil, err } items = append(items, &i) diff --git a/pkg/repository/sqlcv1/models.go b/pkg/repository/sqlcv1/models.go index 8845707516..0507d6ac81 100644 --- a/pkg/repository/sqlcv1/models.go +++ b/pkg/repository/sqlcv1/models.go @@ -2641,6 +2641,7 @@ type Step struct { RetryBackoffFactor pgtype.Float8 `json:"retryBackoffFactor"` RetryMaxBackoff pgtype.Int4 `json:"retryMaxBackoff"` ScheduleTimeout string `json:"scheduleTimeout"` + IsDurable bool `json:"isDurable"` } type StepDesiredWorkerLabel struct { @@ -3346,6 +3347,15 @@ type V1StepMatchCondition struct { ParentReadableID pgtype.Text `json:"parent_readable_id"` } +type V1StepSlotRequest struct { + TenantID uuid.UUID `json:"tenant_id"` + StepID uuid.UUID `json:"step_id"` + SlotType string `json:"slot_type"` + Units int32 `json:"units"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` +} + type V1Task struct { ID int64 `json:"id"` InsertedAt pgtype.Timestamptz `json:"inserted_at"` @@ -3450,6 +3460,18 @@ type V1TaskRuntime struct { TimeoutAt pgtype.Timestamp `json:"timeout_at"` } +type V1TaskRuntimeSlot struct { + TenantID uuid.UUID `json:"tenant_id"` + TaskID int64 `json:"task_id"` + TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` + RetryCount int32 `json:"retry_count"` + WorkerID uuid.UUID `json:"worker_id"` + SlotType string `json:"slot_type"` + Units int32 `json:"units"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` +} + type V1TaskStatusUpdatesTmp struct { TenantID uuid.UUID `json:"tenant_id"` RequeueAfter pgtype.Timestamptz `json:"requeue_after"` @@ -3486,6 +3508,15 @@ type V1TasksOlap struct { ParentTaskExternalID *uuid.UUID `json:"parent_task_external_id"` } +type V1WorkerSlotConfig struct { + TenantID uuid.UUID `json:"tenant_id"` + WorkerID uuid.UUID `json:"worker_id"` + SlotType string `json:"slot_type"` + MaxUnits int32 `json:"max_units"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` +} + type V1WorkflowConcurrency struct { ID int64 `json:"id"` WorkflowID uuid.UUID `json:"workflow_id"` diff --git a/pkg/repository/sqlcv1/queue.sql b/pkg/repository/sqlcv1/queue.sql index 9f42d34ef1..16b2d73e48 100644 --- a/pkg/repository/sqlcv1/queue.sql +++ b/pkg/repository/sqlcv1/queue.sql @@ -50,37 +50,6 @@ WHERE AND w."isActive" = true AND w."isPaused" = false; --- name: ListAvailableSlotsForWorkers :many -WITH worker_max_runs AS ( - SELECT - "id", - "maxRuns" - FROM - "Worker" - WHERE - "tenantId" = @tenantId::uuid - AND "id" = ANY(@workerIds::uuid[]) -), worker_filled_slots AS ( - SELECT - worker_id, - COUNT(task_id) AS "filledSlots" - FROM - v1_task_runtime - WHERE - tenant_id = @tenantId::uuid - AND worker_id = ANY(@workerIds::uuid[]) - GROUP BY - worker_id -) --- subtract the filled slots from the max runs to get the available slots -SELECT - wmr."id", - wmr."maxRuns" - COALESCE(wfs."filledSlots", 0) AS "availableSlots" -FROM - worker_max_runs wmr -LEFT JOIN - worker_filled_slots wfs ON wmr."id" = wfs.worker_id; - -- name: ListQueues :many SELECT * @@ -230,6 +199,7 @@ WITH input AS ( t.retry_count, i.worker_id, t.tenant_id, + t.step_id, CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at FROM v1_task t @@ -259,6 +229,42 @@ WITH input AS ( ON CONFLICT (task_id, task_inserted_at, retry_count) DO NOTHING -- only return the task ids that were successfully assigned RETURNING task_id, worker_id +), slot_requests AS ( + SELECT + t.id, + t.inserted_at, + t.retry_count, + t.worker_id, + t.tenant_id, + COALESCE(req.slot_type, 'default'::text) AS slot_type, + COALESCE(req.units, 1) AS units + FROM + updated_tasks t + LEFT JOIN + v1_step_slot_request req + ON req.step_id = t.step_id AND req.tenant_id = t.tenant_id +), assigned_slots AS ( + INSERT INTO v1_task_runtime_slot ( + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id, + slot_type, + units + ) + SELECT + tenant_id, + id, + inserted_at, + retry_count, + worker_id, + slot_type, + units + FROM + slot_requests + ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING + RETURNING task_id ) SELECT asr.task_id, @@ -280,6 +286,17 @@ FROM WHERE "stepId" = ANY(@stepIds::uuid[]); +-- name: GetStepSlotRequests :many +SELECT + step_id, + slot_type, + units +FROM + v1_step_slot_request +WHERE + step_id = ANY(@stepIds::uuid[]) + AND tenant_id = @tenantId::uuid; + -- name: GetQueuedCounts :many SELECT queue, diff --git a/pkg/repository/sqlcv1/queue.sql.go b/pkg/repository/sqlcv1/queue.sql.go index fe8a3e48ef..b1b1e6f32d 100644 --- a/pkg/repository/sqlcv1/queue.sql.go +++ b/pkg/repository/sqlcv1/queue.sql.go @@ -333,45 +333,39 @@ func (q *Queries) GetQueuedCounts(ctx context.Context, db DBTX, tenantid uuid.UU return items, nil } -const listActionsForWorkers = `-- name: ListActionsForWorkers :many +const getStepSlotRequests = `-- name: GetStepSlotRequests :many SELECT - w."id" as "workerId", - a."actionId" + step_id, + slot_type, + units FROM - "Worker" w -LEFT JOIN - "_ActionToWorker" atw ON w."id" = atw."B" -LEFT JOIN - "Action" a ON atw."A" = a."id" + v1_step_slot_request WHERE - w."tenantId" = $1::uuid - AND w."id" = ANY($2::uuid[]) - AND w."dispatcherId" IS NOT NULL - AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' - AND w."isActive" = true - AND w."isPaused" = false + step_id = ANY($1::uuid[]) + AND tenant_id = $2::uuid ` -type ListActionsForWorkersParams struct { - Tenantid uuid.UUID `json:"tenantid"` - Workerids []uuid.UUID `json:"workerids"` +type GetStepSlotRequestsParams struct { + Stepids []uuid.UUID `json:"stepids"` + Tenantid uuid.UUID `json:"tenantid"` } -type ListActionsForWorkersRow struct { - WorkerId uuid.UUID `json:"workerId"` - ActionId pgtype.Text `json:"actionId"` +type GetStepSlotRequestsRow struct { + StepID uuid.UUID `json:"step_id"` + SlotType string `json:"slot_type"` + Units int32 `json:"units"` } -func (q *Queries) ListActionsForWorkers(ctx context.Context, db DBTX, arg ListActionsForWorkersParams) ([]*ListActionsForWorkersRow, error) { - rows, err := db.Query(ctx, listActionsForWorkers, arg.Tenantid, arg.Workerids) +func (q *Queries) GetStepSlotRequests(ctx context.Context, db DBTX, arg GetStepSlotRequestsParams) ([]*GetStepSlotRequestsRow, error) { + rows, err := db.Query(ctx, getStepSlotRequests, arg.Stepids, arg.Tenantid) if err != nil { return nil, err } defer rows.Close() - var items []*ListActionsForWorkersRow + var items []*GetStepSlotRequestsRow for rows.Next() { - var i ListActionsForWorkersRow - if err := rows.Scan(&i.WorkerId, &i.ActionId); err != nil { + var i GetStepSlotRequestsRow + if err := rows.Scan(&i.StepID, &i.SlotType, &i.Units); err != nil { return nil, err } items = append(items, &i) @@ -382,58 +376,45 @@ func (q *Queries) ListActionsForWorkers(ctx context.Context, db DBTX, arg ListAc return items, nil } -const listAvailableSlotsForWorkers = `-- name: ListAvailableSlotsForWorkers :many -WITH worker_max_runs AS ( - SELECT - "id", - "maxRuns" - FROM - "Worker" - WHERE - "tenantId" = $1::uuid - AND "id" = ANY($2::uuid[]) -), worker_filled_slots AS ( - SELECT - worker_id, - COUNT(task_id) AS "filledSlots" - FROM - v1_task_runtime - WHERE - tenant_id = $1::uuid - AND worker_id = ANY($2::uuid[]) - GROUP BY - worker_id -) +const listActionsForWorkers = `-- name: ListActionsForWorkers :many SELECT - wmr."id", - wmr."maxRuns" - COALESCE(wfs."filledSlots", 0) AS "availableSlots" + w."id" as "workerId", + a."actionId" FROM - worker_max_runs wmr + "Worker" w LEFT JOIN - worker_filled_slots wfs ON wmr."id" = wfs.worker_id + "_ActionToWorker" atw ON w."id" = atw."B" +LEFT JOIN + "Action" a ON atw."A" = a."id" +WHERE + w."tenantId" = $1::uuid + AND w."id" = ANY($2::uuid[]) + AND w."dispatcherId" IS NOT NULL + AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' + AND w."isActive" = true + AND w."isPaused" = false ` -type ListAvailableSlotsForWorkersParams struct { +type ListActionsForWorkersParams struct { Tenantid uuid.UUID `json:"tenantid"` Workerids []uuid.UUID `json:"workerids"` } -type ListAvailableSlotsForWorkersRow struct { - ID uuid.UUID `json:"id"` - AvailableSlots int32 `json:"availableSlots"` +type ListActionsForWorkersRow struct { + WorkerId uuid.UUID `json:"workerId"` + ActionId pgtype.Text `json:"actionId"` } -// subtract the filled slots from the max runs to get the available slots -func (q *Queries) ListAvailableSlotsForWorkers(ctx context.Context, db DBTX, arg ListAvailableSlotsForWorkersParams) ([]*ListAvailableSlotsForWorkersRow, error) { - rows, err := db.Query(ctx, listAvailableSlotsForWorkers, arg.Tenantid, arg.Workerids) +func (q *Queries) ListActionsForWorkers(ctx context.Context, db DBTX, arg ListActionsForWorkersParams) ([]*ListActionsForWorkersRow, error) { + rows, err := db.Query(ctx, listActionsForWorkers, arg.Tenantid, arg.Workerids) if err != nil { return nil, err } defer rows.Close() - var items []*ListAvailableSlotsForWorkersRow + var items []*ListActionsForWorkersRow for rows.Next() { - var i ListAvailableSlotsForWorkersRow - if err := rows.Scan(&i.ID, &i.AvailableSlots); err != nil { + var i ListActionsForWorkersRow + if err := rows.Scan(&i.WorkerId, &i.ActionId); err != nil { return nil, err } items = append(items, &i) @@ -886,6 +867,7 @@ WITH input AS ( t.retry_count, i.worker_id, t.tenant_id, + t.step_id, CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at FROM v1_task t @@ -915,6 +897,42 @@ WITH input AS ( ON CONFLICT (task_id, task_inserted_at, retry_count) DO NOTHING -- only return the task ids that were successfully assigned RETURNING task_id, worker_id +), slot_requests AS ( + SELECT + t.id, + t.inserted_at, + t.retry_count, + t.worker_id, + t.tenant_id, + COALESCE(req.slot_type, 'default'::text) AS slot_type, + COALESCE(req.units, 1) AS units + FROM + updated_tasks t + LEFT JOIN + v1_step_slot_request req + ON req.step_id = t.step_id AND req.tenant_id = t.tenant_id +), assigned_slots AS ( + INSERT INTO v1_task_runtime_slot ( + tenant_id, + task_id, + task_inserted_at, + retry_count, + worker_id, + slot_type, + units + ) + SELECT + tenant_id, + id, + inserted_at, + retry_count, + worker_id, + slot_type, + units + FROM + slot_requests + ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING + RETURNING task_id ) SELECT asr.task_id, diff --git a/pkg/repository/sqlcv1/tasks-overwrite.go b/pkg/repository/sqlcv1/tasks-overwrite.go index ef822e3b36..8538b21355 100644 --- a/pkg/repository/sqlcv1/tasks-overwrite.go +++ b/pkg/repository/sqlcv1/tasks-overwrite.go @@ -729,13 +729,16 @@ WITH input AS ( ORDER BY task_id, task_inserted_at, retry_count FOR UPDATE +), deleted_slots AS ( + DELETE FROM + v1_task_runtime_slot + WHERE + (task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM input) ), deleted_runtimes AS ( DELETE FROM v1_task_runtime WHERE (task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM runtimes_to_delete) - -- return a constant for ordering - RETURNING 1 AS cte_order ) SELECT t.queue, diff --git a/pkg/repository/sqlcv1/tasks-overwrite.sql b/pkg/repository/sqlcv1/tasks-overwrite.sql deleted file mode 100644 index a2d843af59..0000000000 --- a/pkg/repository/sqlcv1/tasks-overwrite.sql +++ /dev/null @@ -1,146 +0,0 @@ --- NOTE: this file doesn't typically get generated, since we need to overwrite the --- behavior of `@dagIds` and `@dagInsertedAts` to be nullable. It can be generated --- when we'd like to change the query. - --- name: CreateTasks :many -WITH input AS ( - SELECT - * - FROM - ( - SELECT - unnest(@tenantIds::uuid[]) AS tenant_id, - unnest(@queues::text[]) AS queue, - unnest(@actionIds::text[]) AS action_id, - unnest(@stepIds::uuid[]) AS step_id, - unnest(@stepReadableIds::text[]) AS step_readable_id, - unnest(@workflowIds::uuid[]) AS workflow_id, - unnest(@scheduleTimeouts::text[]) AS schedule_timeout, - unnest(@stepTimeouts::text[]) AS step_timeout, - unnest(@priorities::integer[]) AS priority, - unnest(cast(@stickies::text[] as v1_sticky_strategy[])) AS sticky, - unnest(@desiredWorkerIds::uuid[]) AS desired_worker_id, - unnest(@externalIds::uuid[]) AS external_id, - unnest(@displayNames::text[]) AS display_name, - unnest(@inputs::jsonb[]) AS input, - unnest(@retryCounts::integer[]) AS retry_count, - unnest(@additionalMetadatas::jsonb[]) AS additional_metadata, - unnest(cast(@initialStates::text[] as v1_task_initial_state[])) AS initial_state, - -- NOTE: these are nullable, so sqlc doesn't support casting to a type - unnest(@dagIds::bigint[]) AS dag_id, - unnest(@dagInsertedAts::timestamptz[]) AS dag_inserted_at - ) AS subquery -) -INSERT INTO v1_task ( - tenant_id, - queue, - action_id, - step_id, - step_readable_id, - workflow_id, - schedule_timeout, - step_timeout, - priority, - sticky, - desired_worker_id, - external_id, - display_name, - input, - retry_count, - additional_metadata, - initial_state, - dag_id, - dag_inserted_at -) -SELECT - i.tenant_id, - i.queue, - i.action_id, - i.step_id, - i.step_readable_id, - i.workflow_id, - i.schedule_timeout, - i.step_timeout, - i.priority, - i.sticky, - i.desired_worker_id, - i.external_id, - i.display_name, - i.input, - i.retry_count, - i.additional_metadata, - i.initial_state, - i.dag_id, - i.dag_inserted_at -FROM - input i -RETURNING - *; - --- name: ReplayTasks :many --- NOTE: at this point, we assume we have a lock on tasks and therefor we can update the tasks -WITH input AS ( - SELECT - * - FROM - ( - SELECT - unnest(@taskIds::bigint[]) AS task_id, - unnest(@inputs::jsonb[]) AS input, - unnest(cast(@initialStates::text[] as v1_task_initial_state[])) AS initial_state, - unnest_nd_1d(@concurrencyStrategyIds::bigint[][]) AS concurrency_strategy_ids, - unnest_nd_1d(@concurrencyKeys::text[][]) AS concurrency_keys, - unnest(@initialStateReason::text[]) AS initial_state_reason - ) AS subquery -) -UPDATE - v1_task -SET - retry_count = retry_count + 1, - app_retry_count = 0, - internal_retry_count = 0, - input = CASE WHEN i.input IS NOT NULL THEN i.input ELSE v1_task.input END, - initial_state = i.initial_state, - concurrency_strategy_ids = i.concurrency_strategy_ids, - concurrency_keys = i.concurrency_keys, - initial_state_reason = i.initial_state_reason -FROM - input i -WHERE - v1_task.id = i.task_id -RETURNING - v1_task.*; - --- name: CreateTaskExpressionEvals :exec -WITH input AS ( - SELECT - * - FROM - ( - SELECT - unnest(@taskIds::bigint[]) AS task_id, - unnest(@taskInsertedAts::timestamptz[]) AS task_inserted_at, - unnest(@keys::text[]) AS key, - unnest(@valuesStr::text[]) AS value_str, - unnest(cast(@kinds::text[] as "StepExpressionKind"[])) AS kind - ) AS subquery -) -INSERT INTO v1_task_expression_eval ( - key, - task_id, - task_inserted_at, - value_str, - kind -) -SELECT - i.key, - i.task_id, - i.task_inserted_at, - i.value_str, - i.kind -FROM - input i -ON CONFLICT (key, task_id, task_inserted_at, kind) DO UPDATE -SET - value_str = EXCLUDED.value_str, - value_int = EXCLUDED.value_int; diff --git a/pkg/repository/sqlcv1/tasks.sql b/pkg/repository/sqlcv1/tasks.sql index 0929837765..4639bfcb18 100644 --- a/pkg/repository/sqlcv1/tasks.sql +++ b/pkg/repository/sqlcv1/tasks.sql @@ -912,6 +912,11 @@ WITH task AS ( ORDER BY task_id, task_inserted_at, retry_count FOR UPDATE +), deleted_slots AS ( + DELETE FROM v1_task_runtime_slot + WHERE + (task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM locked_runtime) + RETURNING task_id ) UPDATE v1_task_runtime @@ -970,6 +975,12 @@ WITH locked_trs AS ( LIMIT @batchSize::int FOR UPDATE SKIP LOCKED ) +DELETE FROM v1_task_runtime_slot +WHERE (task_id, task_inserted_at, retry_count) IN ( + SELECT task_id, task_inserted_at, retry_count + FROM locked_trs +); + DELETE FROM v1_task_runtime WHERE (task_id, task_inserted_at, retry_count) IN ( SELECT task_id, task_inserted_at, retry_count @@ -1160,7 +1171,13 @@ SELECT FROM running_tasks; -- name: FindOldestRunningTask :one -SELECT * +SELECT + task_id, + task_inserted_at, + retry_count, + worker_id, + tenant_id, + timeout_at FROM v1_task_runtime ORDER BY task_id, task_inserted_at LIMIT 1; diff --git a/pkg/repository/sqlcv1/tasks.sql.go b/pkg/repository/sqlcv1/tasks.sql.go index 55bb058551..06daf6d4ef 100644 --- a/pkg/repository/sqlcv1/tasks.sql.go +++ b/pkg/repository/sqlcv1/tasks.sql.go @@ -125,7 +125,7 @@ WITH locked_trs AS ( LIMIT $1::int FOR UPDATE SKIP LOCKED ) -DELETE FROM v1_task_runtime +DELETE FROM v1_task_runtime_slot WHERE (task_id, task_inserted_at, retry_count) IN ( SELECT task_id, task_inserted_at, retry_count FROM locked_trs @@ -603,7 +603,13 @@ func (q *Queries) FilterValidTasks(ctx context.Context, db DBTX, arg FilterValid } const findOldestRunningTask = `-- name: FindOldestRunningTask :one -SELECT task_id, task_inserted_at, retry_count, worker_id, tenant_id, timeout_at +SELECT + task_id, + task_inserted_at, + retry_count, + worker_id, + tenant_id, + timeout_at FROM v1_task_runtime ORDER BY task_id, task_inserted_at LIMIT 1 @@ -2191,6 +2197,11 @@ WITH task AS ( ORDER BY task_id, task_inserted_at, retry_count FOR UPDATE +), deleted_slots AS ( + DELETE FROM v1_task_runtime_slot + WHERE + (task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM locked_runtime) + RETURNING task_id ) UPDATE v1_task_runtime diff --git a/pkg/repository/sqlcv1/tenant_limits.sql b/pkg/repository/sqlcv1/tenant_limits.sql index 2ab1e300e8..1818306169 100644 --- a/pkg/repository/sqlcv1/tenant_limits.sql +++ b/pkg/repository/sqlcv1/tenant_limits.sql @@ -93,10 +93,3 @@ FROM "Worker" WHERE "tenantId" = @tenantId::uuid AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL AND "isActive" = true; - --- name: CountTenantWorkerSlots :one -SELECT COALESCE(SUM(w."maxRuns"), 0)::int AS "count" -FROM "Worker" w -WHERE "tenantId" = @tenantId::uuid -AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL -AND "isActive" = true; diff --git a/pkg/repository/sqlcv1/tenant_limits.sql.go b/pkg/repository/sqlcv1/tenant_limits.sql.go index 8d562be2fc..dcc5993948 100644 --- a/pkg/repository/sqlcv1/tenant_limits.sql.go +++ b/pkg/repository/sqlcv1/tenant_limits.sql.go @@ -12,21 +12,6 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) -const countTenantWorkerSlots = `-- name: CountTenantWorkerSlots :one -SELECT COALESCE(SUM(w."maxRuns"), 0)::int AS "count" -FROM "Worker" w -WHERE "tenantId" = $1::uuid -AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL -AND "isActive" = true -` - -func (q *Queries) CountTenantWorkerSlots(ctx context.Context, db DBTX, tenantid uuid.UUID) (int32, error) { - row := db.QueryRow(ctx, countTenantWorkerSlots, tenantid) - var count int32 - err := row.Scan(&count) - return count, err -} - const countTenantWorkers = `-- name: CountTenantWorkers :one SELECT COUNT(distinct id) AS "count" FROM "Worker" diff --git a/pkg/repository/sqlcv1/tenants.sql b/pkg/repository/sqlcv1/tenants.sql index 91d13ce3ac..685e387456 100644 --- a/pkg/repository/sqlcv1/tenants.sql +++ b/pkg/repository/sqlcv1/tenants.sql @@ -652,8 +652,7 @@ WHERE "id" = @id::uuid; -- name: GetTenantUsageData :one WITH active_workers AS ( SELECT - workers."id", - workers."maxRuns" + workers."id" FROM "Worker" workers WHERE @@ -662,20 +661,13 @@ WITH active_workers AS ( AND workers."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' AND workers."isActive" = true AND workers."isPaused" = false -), worker_slots AS ( - SELECT - aw."id" AS worker_id, - aw."maxRuns" - ( - SELECT COUNT(*) - FROM v1_task_runtime runtime - WHERE - runtime.tenant_id = @tenantId::uuid AND - runtime.worker_id = aw."id" - ) AS "remainingSlots" - FROM - active_workers aw ) SELECT (SELECT COUNT(*) FROM active_workers) AS "workerCount", - COALESCE((SELECT SUM("maxRuns") - SUM("remainingSlots") FROM active_workers aw JOIN worker_slots ws ON aw."id" = ws.worker_id), 0)::bigint AS "usedWorkerSlotsCount", + COALESCE(( + SELECT SUM(s.units) + FROM v1_task_runtime_slot s + WHERE s.tenant_id = @tenantId::uuid + AND s.worker_id IN (SELECT "id" FROM active_workers) + ), 0)::bigint AS "usedWorkerSlotsCount", (SELECT COUNT(*) FROM "TenantMember" WHERE "tenantId" = @tenantId::uuid) AS "tenantMembersCount"; diff --git a/pkg/repository/sqlcv1/tenants.sql.go b/pkg/repository/sqlcv1/tenants.sql.go index 8f0a9314ba..a44235f00d 100644 --- a/pkg/repository/sqlcv1/tenants.sql.go +++ b/pkg/repository/sqlcv1/tenants.sql.go @@ -761,8 +761,7 @@ func (q *Queries) GetTenantTotalQueueMetrics(ctx context.Context, db DBTX, arg G const getTenantUsageData = `-- name: GetTenantUsageData :one WITH active_workers AS ( SELECT - workers."id", - workers."maxRuns" + workers."id" FROM "Worker" workers WHERE @@ -771,22 +770,15 @@ WITH active_workers AS ( AND workers."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' AND workers."isActive" = true AND workers."isPaused" = false -), worker_slots AS ( - SELECT - aw."id" AS worker_id, - aw."maxRuns" - ( - SELECT COUNT(*) - FROM v1_task_runtime runtime - WHERE - runtime.tenant_id = $1::uuid AND - runtime.worker_id = aw."id" - ) AS "remainingSlots" - FROM - active_workers aw ) SELECT (SELECT COUNT(*) FROM active_workers) AS "workerCount", - COALESCE((SELECT SUM("maxRuns") - SUM("remainingSlots") FROM active_workers aw JOIN worker_slots ws ON aw."id" = ws.worker_id), 0)::bigint AS "usedWorkerSlotsCount", + COALESCE(( + SELECT SUM(s.units) + FROM v1_task_runtime_slot s + WHERE s.tenant_id = $1::uuid + AND s.worker_id IN (SELECT "id" FROM active_workers) + ), 0)::bigint AS "usedWorkerSlotsCount", (SELECT COUNT(*) FROM "TenantMember" WHERE "tenantId" = $1::uuid) AS "tenantMembersCount" ` diff --git a/pkg/repository/sqlcv1/workers.sql b/pkg/repository/sqlcv1/workers.sql index dac663170b..7a14994475 100644 --- a/pkg/repository/sqlcv1/workers.sql +++ b/pkg/repository/sqlcv1/workers.sql @@ -10,22 +10,111 @@ SELECT FROM "WorkerLabel" wl WHERE wl."workerId" = ANY(@workerIds::uuid[]); --- name: ListWorkersWithSlotCount :many +-- name: ListWorkerSlotConfigs :many SELECT - sqlc.embed(workers), - ww."url" AS "webhookUrl", - ww."id" AS "webhookId", - workers."maxRuns" - ( - SELECT COUNT(*) - FROM v1_task_runtime runtime - WHERE - runtime.tenant_id = workers."tenantId" AND - runtime.worker_id = workers."id" - ) AS "remainingSlots" + worker_id, + slot_type, + max_units FROM - "Worker" workers + v1_worker_slot_config +WHERE + tenant_id = @tenantId::uuid + AND worker_id = ANY(@workerIds::uuid[]); + +-- name: CreateWorkerSlotConfigs :exec +INSERT INTO v1_worker_slot_config ( + tenant_id, + worker_id, + slot_type, + max_units, + created_at, + updated_at +) +SELECT + @tenantId::uuid, + @workerId::uuid, + unnest(@slotTypes::text[]), + unnest(@maxUnits::integer[]), + CURRENT_TIMESTAMP, + CURRENT_TIMESTAMP +-- NOTE: ON CONFLICT can be removed after the 0_76_d migration is run to remove insert triggers added in 0_76 +ON CONFLICT (tenant_id, worker_id, slot_type) DO UPDATE SET + max_units = EXCLUDED.max_units, + updated_at = CURRENT_TIMESTAMP; + +-- name: ListAvailableSlotsForWorkers :many +WITH worker_capacities AS ( + SELECT + worker_id, + max_units + FROM + v1_worker_slot_config + WHERE + tenant_id = @tenantId::uuid + AND worker_id = ANY(@workerIds::uuid[]) + AND slot_type = @slotType::text +), worker_used_slots AS ( + SELECT + worker_id, + SUM(units) AS used_units + FROM + v1_task_runtime_slot + WHERE + tenant_id = @tenantId::uuid + AND worker_id = ANY(@workerIds::uuid[]) + AND slot_type = @slotType::text + GROUP BY + worker_id +) +SELECT + wc.worker_id AS "id", + wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots" +FROM + worker_capacities wc +LEFT JOIN + worker_used_slots wus ON wc.worker_id = wus.worker_id; + +-- name: ListAvailableSlotsForWorkersAndTypes :many +WITH worker_capacities AS ( + SELECT + worker_id, + slot_type, + max_units + FROM + v1_worker_slot_config + WHERE + tenant_id = @tenantId::uuid + AND worker_id = ANY(@workerIds::uuid[]) + AND slot_type = ANY(@slotTypes::text[]) +), worker_used_slots AS ( + SELECT + worker_id, + slot_type, + SUM(units) AS used_units + FROM + v1_task_runtime_slot + WHERE + tenant_id = @tenantId::uuid + AND worker_id = ANY(@workerIds::uuid[]) + AND slot_type = ANY(@slotTypes::text[]) + GROUP BY + worker_id, + slot_type +) +SELECT + wc.worker_id AS "id", + wc.slot_type AS "slotType", + wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots" +FROM + worker_capacities wc LEFT JOIN - "WebhookWorker" ww ON workers."webhookId" = ww."id" + worker_used_slots wus ON wc.worker_id = wus.worker_id AND wc.slot_type = wus.slot_type; + +-- name: ListWorkers :many +SELECT + sqlc.embed(workers) +FROM + "Worker" workers WHERE workers."tenantId" = @tenantId AND ( @@ -43,31 +132,24 @@ WHERE ) AND ( sqlc.narg('assignable')::boolean IS NULL OR - workers."maxRuns" IS NULL OR - (sqlc.narg('assignable')::boolean AND workers."maxRuns" > ( - SELECT COUNT(*) - FROM "StepRun" srs - WHERE srs."workerId" = workers."id" AND srs."status" = 'RUNNING' + (sqlc.narg('assignable')::boolean AND ( + SELECT COALESCE(SUM(cap.max_units), 0) + FROM v1_worker_slot_config cap + WHERE cap.tenant_id = workers."tenantId" AND cap.worker_id = workers."id" + ) > ( + SELECT COALESCE(SUM(runtime.units), 0) + FROM v1_task_runtime_slot runtime + WHERE runtime.tenant_id = workers."tenantId" AND runtime.worker_id = workers."id" )) ) GROUP BY - workers."id", ww."url", ww."id"; + workers."id"; -- name: GetWorkerById :one SELECT - sqlc.embed(w), - ww."url" AS "webhookUrl", - w."maxRuns" - ( - SELECT COUNT(*) - FROM v1_task_runtime runtime - WHERE - runtime.tenant_id = w."tenantId" AND - runtime.worker_id = w."id" - ) AS "remainingSlots" + sqlc.embed(w) FROM "Worker" w -LEFT JOIN - "WebhookWorker" ww ON w."webhookId" = ww."id" WHERE w."id" = @id::uuid; @@ -108,14 +190,32 @@ LIMIT COALESCE(sqlc.narg('limit')::int, 100); -- name: ListTotalActiveSlotsPerTenant :many -SELECT "tenantId", SUM("maxRuns") AS "totalActiveSlots" -FROM "Worker" +SELECT + wc.tenant_id AS "tenantId", + SUM(wc.max_units) AS "totalActiveSlots" +FROM v1_worker_slot_config wc +JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id WHERE - "dispatcherId" IS NOT NULL - AND "lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' - AND "isActive" = true - AND "isPaused" = false -GROUP BY "tenantId" + w."dispatcherId" IS NOT NULL + AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' + AND w."isActive" = true + AND w."isPaused" = false +GROUP BY wc.tenant_id +; + +-- name: ListActiveSlotsPerTenantAndSlotType :many +SELECT + wc.tenant_id AS "tenantId", + wc.slot_type AS "slotType", + SUM(wc.max_units) AS "activeSlots" +FROM v1_worker_slot_config wc +JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id +WHERE + w."dispatcherId" IS NOT NULL + AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' + AND w."isActive" = true + AND w."isPaused" = false +GROUP BY wc.tenant_id, wc.slot_type ; -- name: ListActiveSDKsPerTenant :many @@ -210,7 +310,6 @@ UPDATE SET "updatedAt" = CURRENT_TIMESTAMP, "dispatcherId" = coalesce(sqlc.narg('dispatcherId')::uuid, "dispatcherId"), - "maxRuns" = coalesce(sqlc.narg('maxRuns')::int, "maxRuns"), "lastHeartbeatAt" = coalesce(sqlc.narg('lastHeartbeatAt')::timestamp, "lastHeartbeatAt"), "isActive" = coalesce(sqlc.narg('isActive')::boolean, "isActive"), "isPaused" = coalesce(sqlc.narg('isPaused')::boolean, "isPaused") @@ -350,8 +449,6 @@ INSERT INTO "Worker" ( "tenantId", "name", "dispatcherId", - "maxRuns", - "webhookId", "type", "sdkVersion", "language", @@ -365,8 +462,6 @@ INSERT INTO "Worker" ( @tenantId::uuid, @name::text, @dispatcherId::uuid, - sqlc.narg('maxRuns')::int, - sqlc.narg('webhookId')::uuid, sqlc.narg('type')::"WorkerType", sqlc.narg('sdkVersion')::text, sqlc.narg('language')::"WorkerSDKS", diff --git a/pkg/repository/sqlcv1/workers.sql.go b/pkg/repository/sqlcv1/workers.sql.go index 320401d7dd..a317a60f87 100644 --- a/pkg/repository/sqlcv1/workers.sql.go +++ b/pkg/repository/sqlcv1/workers.sql.go @@ -20,8 +20,6 @@ INSERT INTO "Worker" ( "tenantId", "name", "dispatcherId", - "maxRuns", - "webhookId", "type", "sdkVersion", "language", @@ -35,14 +33,12 @@ INSERT INTO "Worker" ( $1::uuid, $2::text, $3::uuid, - $4::int, - $5::uuid, - $6::"WorkerType", + $4::"WorkerType", + $5::text, + $6::"WorkerSDKS", $7::text, - $8::"WorkerSDKS", - $9::text, - $10::text, - $11::text + $8::text, + $9::text ) RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" ` @@ -50,8 +46,6 @@ type CreateWorkerParams struct { Tenantid uuid.UUID `json:"tenantid"` Name string `json:"name"` Dispatcherid uuid.UUID `json:"dispatcherid"` - MaxRuns pgtype.Int4 `json:"maxRuns"` - WebhookId *uuid.UUID `json:"webhookId"` Type NullWorkerType `json:"type"` SdkVersion pgtype.Text `json:"sdkVersion"` Language NullWorkerSDKS `json:"language"` @@ -65,8 +59,6 @@ func (q *Queries) CreateWorker(ctx context.Context, db DBTX, arg CreateWorkerPar arg.Tenantid, arg.Name, arg.Dispatcherid, - arg.MaxRuns, - arg.WebhookId, arg.Type, arg.SdkVersion, arg.Language, @@ -99,6 +91,45 @@ func (q *Queries) CreateWorker(ctx context.Context, db DBTX, arg CreateWorkerPar return &i, err } +const createWorkerSlotConfigs = `-- name: CreateWorkerSlotConfigs :exec +INSERT INTO v1_worker_slot_config ( + tenant_id, + worker_id, + slot_type, + max_units, + created_at, + updated_at +) +SELECT + $1::uuid, + $2::uuid, + unnest($3::text[]), + unnest($4::integer[]), + CURRENT_TIMESTAMP, + CURRENT_TIMESTAMP +ON CONFLICT (tenant_id, worker_id, slot_type) DO UPDATE SET + max_units = EXCLUDED.max_units, + updated_at = CURRENT_TIMESTAMP +` + +type CreateWorkerSlotConfigsParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Workerid uuid.UUID `json:"workerid"` + Slottypes []string `json:"slottypes"` + Maxunits []int32 `json:"maxunits"` +} + +// NOTE: ON CONFLICT can be removed after the 0_76_d migration is run to remove insert triggers added in 0_76 +func (q *Queries) CreateWorkerSlotConfigs(ctx context.Context, db DBTX, arg CreateWorkerSlotConfigsParams) error { + _, err := db.Exec(ctx, createWorkerSlotConfigs, + arg.Tenantid, + arg.Workerid, + arg.Slottypes, + arg.Maxunits, + ) + return err +} + const deleteOldWorkers = `-- name: DeleteOldWorkers :one WITH for_delete AS ( SELECT @@ -291,27 +322,15 @@ func (q *Queries) GetWorkerActionsByWorkerId(ctx context.Context, db DBTX, arg G const getWorkerById = `-- name: GetWorkerById :one SELECT - w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion", - ww."url" AS "webhookUrl", - w."maxRuns" - ( - SELECT COUNT(*) - FROM v1_task_runtime runtime - WHERE - runtime.tenant_id = w."tenantId" AND - runtime.worker_id = w."id" - ) AS "remainingSlots" + w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion" FROM "Worker" w -LEFT JOIN - "WebhookWorker" ww ON w."webhookId" = ww."id" WHERE w."id" = $1::uuid ` type GetWorkerByIdRow struct { - Worker Worker `json:"worker"` - WebhookUrl pgtype.Text `json:"webhookUrl"` - RemainingSlots int32 `json:"remainingSlots"` + Worker Worker `json:"worker"` } func (q *Queries) GetWorkerById(ctx context.Context, db DBTX, id uuid.UUID) (*GetWorkerByIdRow, error) { @@ -337,8 +356,6 @@ func (q *Queries) GetWorkerById(ctx context.Context, db DBTX, id uuid.UUID) (*Ge &i.Worker.Os, &i.Worker.RuntimeExtra, &i.Worker.SdkVersion, - &i.WebhookUrl, - &i.RemainingSlots, ) return &i, err } @@ -534,6 +551,47 @@ func (q *Queries) ListActiveSDKsPerTenant(ctx context.Context, db DBTX) ([]*List return items, nil } +const listActiveSlotsPerTenantAndSlotType = `-- name: ListActiveSlotsPerTenantAndSlotType :many +SELECT + wc.tenant_id AS "tenantId", + wc.slot_type AS "slotType", + SUM(wc.max_units) AS "activeSlots" +FROM v1_worker_slot_config wc +JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id +WHERE + w."dispatcherId" IS NOT NULL + AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' + AND w."isActive" = true + AND w."isPaused" = false +GROUP BY wc.tenant_id, wc.slot_type +` + +type ListActiveSlotsPerTenantAndSlotTypeRow struct { + TenantId uuid.UUID `json:"tenantId"` + SlotType string `json:"slotType"` + ActiveSlots int64 `json:"activeSlots"` +} + +func (q *Queries) ListActiveSlotsPerTenantAndSlotType(ctx context.Context, db DBTX) ([]*ListActiveSlotsPerTenantAndSlotTypeRow, error) { + rows, err := db.Query(ctx, listActiveSlotsPerTenantAndSlotType) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ListActiveSlotsPerTenantAndSlotTypeRow + for rows.Next() { + var i ListActiveSlotsPerTenantAndSlotTypeRow + if err := rows.Scan(&i.TenantId, &i.SlotType, &i.ActiveSlots); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const listActiveWorkersPerTenant = `-- name: ListActiveWorkersPerTenant :many SELECT "tenantId", COUNT(*) FROM "Worker" @@ -570,6 +628,139 @@ func (q *Queries) ListActiveWorkersPerTenant(ctx context.Context, db DBTX) ([]*L return items, nil } +const listAvailableSlotsForWorkers = `-- name: ListAvailableSlotsForWorkers :many +WITH worker_capacities AS ( + SELECT + worker_id, + max_units + FROM + v1_worker_slot_config + WHERE + tenant_id = $1::uuid + AND worker_id = ANY($2::uuid[]) + AND slot_type = $3::text +), worker_used_slots AS ( + SELECT + worker_id, + SUM(units) AS used_units + FROM + v1_task_runtime_slot + WHERE + tenant_id = $1::uuid + AND worker_id = ANY($2::uuid[]) + AND slot_type = $3::text + GROUP BY + worker_id +) +SELECT + wc.worker_id AS "id", + wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots" +FROM + worker_capacities wc +LEFT JOIN + worker_used_slots wus ON wc.worker_id = wus.worker_id +` + +type ListAvailableSlotsForWorkersParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Workerids []uuid.UUID `json:"workerids"` + Slottype string `json:"slottype"` +} + +type ListAvailableSlotsForWorkersRow struct { + ID uuid.UUID `json:"id"` + AvailableSlots int32 `json:"availableSlots"` +} + +func (q *Queries) ListAvailableSlotsForWorkers(ctx context.Context, db DBTX, arg ListAvailableSlotsForWorkersParams) ([]*ListAvailableSlotsForWorkersRow, error) { + rows, err := db.Query(ctx, listAvailableSlotsForWorkers, arg.Tenantid, arg.Workerids, arg.Slottype) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ListAvailableSlotsForWorkersRow + for rows.Next() { + var i ListAvailableSlotsForWorkersRow + if err := rows.Scan(&i.ID, &i.AvailableSlots); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAvailableSlotsForWorkersAndTypes = `-- name: ListAvailableSlotsForWorkersAndTypes :many +WITH worker_capacities AS ( + SELECT + worker_id, + slot_type, + max_units + FROM + v1_worker_slot_config + WHERE + tenant_id = $1::uuid + AND worker_id = ANY($2::uuid[]) + AND slot_type = ANY($3::text[]) +), worker_used_slots AS ( + SELECT + worker_id, + slot_type, + SUM(units) AS used_units + FROM + v1_task_runtime_slot + WHERE + tenant_id = $1::uuid + AND worker_id = ANY($2::uuid[]) + AND slot_type = ANY($3::text[]) + GROUP BY + worker_id, + slot_type +) +SELECT + wc.worker_id AS "id", + wc.slot_type AS "slotType", + wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots" +FROM + worker_capacities wc +LEFT JOIN + worker_used_slots wus ON wc.worker_id = wus.worker_id AND wc.slot_type = wus.slot_type +` + +type ListAvailableSlotsForWorkersAndTypesParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Workerids []uuid.UUID `json:"workerids"` + Slottypes []string `json:"slottypes"` +} + +type ListAvailableSlotsForWorkersAndTypesRow struct { + ID uuid.UUID `json:"id"` + SlotType string `json:"slotType"` + AvailableSlots int32 `json:"availableSlots"` +} + +func (q *Queries) ListAvailableSlotsForWorkersAndTypes(ctx context.Context, db DBTX, arg ListAvailableSlotsForWorkersAndTypesParams) ([]*ListAvailableSlotsForWorkersAndTypesRow, error) { + rows, err := db.Query(ctx, listAvailableSlotsForWorkersAndTypes, arg.Tenantid, arg.Workerids, arg.Slottypes) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ListAvailableSlotsForWorkersAndTypesRow + for rows.Next() { + var i ListAvailableSlotsForWorkersAndTypesRow + if err := rows.Scan(&i.ID, &i.SlotType, &i.AvailableSlots); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const listDispatcherIdsForWorkers = `-- name: ListDispatcherIdsForWorkers :many SELECT "id" as "workerId", @@ -793,14 +984,17 @@ func (q *Queries) ListSemaphoreSlotsWithStateForWorker(ctx context.Context, db D } const listTotalActiveSlotsPerTenant = `-- name: ListTotalActiveSlotsPerTenant :many -SELECT "tenantId", SUM("maxRuns") AS "totalActiveSlots" -FROM "Worker" +SELECT + wc.tenant_id AS "tenantId", + SUM(wc.max_units) AS "totalActiveSlots" +FROM v1_worker_slot_config wc +JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id WHERE - "dispatcherId" IS NOT NULL - AND "lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' - AND "isActive" = true - AND "isPaused" = false -GROUP BY "tenantId" + w."dispatcherId" IS NOT NULL + AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds' + AND w."isActive" = true + AND w."isPaused" = false +GROUP BY wc.tenant_id ` type ListTotalActiveSlotsPerTenantRow struct { @@ -876,22 +1070,54 @@ func (q *Queries) ListWorkerLabels(ctx context.Context, db DBTX, workerid uuid.U return items, nil } -const listWorkersWithSlotCount = `-- name: ListWorkersWithSlotCount :many +const listWorkerSlotConfigs = `-- name: ListWorkerSlotConfigs :many SELECT - workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId", workers.language, workers."languageVersion", workers.os, workers."runtimeExtra", workers."sdkVersion", - ww."url" AS "webhookUrl", - ww."id" AS "webhookId", - workers."maxRuns" - ( - SELECT COUNT(*) - FROM v1_task_runtime runtime - WHERE - runtime.tenant_id = workers."tenantId" AND - runtime.worker_id = workers."id" - ) AS "remainingSlots" + worker_id, + slot_type, + max_units +FROM + v1_worker_slot_config +WHERE + tenant_id = $1::uuid + AND worker_id = ANY($2::uuid[]) +` + +type ListWorkerSlotConfigsParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Workerids []uuid.UUID `json:"workerids"` +} + +type ListWorkerSlotConfigsRow struct { + WorkerID uuid.UUID `json:"worker_id"` + SlotType string `json:"slot_type"` + MaxUnits int32 `json:"max_units"` +} + +func (q *Queries) ListWorkerSlotConfigs(ctx context.Context, db DBTX, arg ListWorkerSlotConfigsParams) ([]*ListWorkerSlotConfigsRow, error) { + rows, err := db.Query(ctx, listWorkerSlotConfigs, arg.Tenantid, arg.Workerids) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ListWorkerSlotConfigsRow + for rows.Next() { + var i ListWorkerSlotConfigsRow + if err := rows.Scan(&i.WorkerID, &i.SlotType, &i.MaxUnits); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listWorkers = `-- name: ListWorkers :many +SELECT + workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId", workers.language, workers."languageVersion", workers.os, workers."runtimeExtra", workers."sdkVersion" FROM "Worker" workers -LEFT JOIN - "WebhookWorker" ww ON workers."webhookId" = ww."id" WHERE workers."tenantId" = $1 AND ( @@ -909,33 +1135,33 @@ WHERE ) AND ( $4::boolean IS NULL OR - workers."maxRuns" IS NULL OR - ($4::boolean AND workers."maxRuns" > ( - SELECT COUNT(*) - FROM "StepRun" srs - WHERE srs."workerId" = workers."id" AND srs."status" = 'RUNNING' + ($4::boolean AND ( + SELECT COALESCE(SUM(cap.max_units), 0) + FROM v1_worker_slot_config cap + WHERE cap.tenant_id = workers."tenantId" AND cap.worker_id = workers."id" + ) > ( + SELECT COALESCE(SUM(runtime.units), 0) + FROM v1_task_runtime_slot runtime + WHERE runtime.tenant_id = workers."tenantId" AND runtime.worker_id = workers."id" )) ) GROUP BY - workers."id", ww."url", ww."id" + workers."id" ` -type ListWorkersWithSlotCountParams struct { +type ListWorkersParams struct { Tenantid uuid.UUID `json:"tenantid"` ActionId pgtype.Text `json:"actionId"` LastHeartbeatAfter pgtype.Timestamp `json:"lastHeartbeatAfter"` Assignable pgtype.Bool `json:"assignable"` } -type ListWorkersWithSlotCountRow struct { - Worker Worker `json:"worker"` - WebhookUrl pgtype.Text `json:"webhookUrl"` - WebhookId *uuid.UUID `json:"webhookId"` - RemainingSlots int32 `json:"remainingSlots"` +type ListWorkersRow struct { + Worker Worker `json:"worker"` } -func (q *Queries) ListWorkersWithSlotCount(ctx context.Context, db DBTX, arg ListWorkersWithSlotCountParams) ([]*ListWorkersWithSlotCountRow, error) { - rows, err := db.Query(ctx, listWorkersWithSlotCount, +func (q *Queries) ListWorkers(ctx context.Context, db DBTX, arg ListWorkersParams) ([]*ListWorkersRow, error) { + rows, err := db.Query(ctx, listWorkers, arg.Tenantid, arg.ActionId, arg.LastHeartbeatAfter, @@ -945,9 +1171,9 @@ func (q *Queries) ListWorkersWithSlotCount(ctx context.Context, db DBTX, arg Lis return nil, err } defer rows.Close() - var items []*ListWorkersWithSlotCountRow + var items []*ListWorkersRow for rows.Next() { - var i ListWorkersWithSlotCountRow + var i ListWorkersRow if err := rows.Scan( &i.Worker.ID, &i.Worker.CreatedAt, @@ -968,9 +1194,6 @@ func (q *Queries) ListWorkersWithSlotCount(ctx context.Context, db DBTX, arg Lis &i.Worker.Os, &i.Worker.RuntimeExtra, &i.Worker.SdkVersion, - &i.WebhookUrl, - &i.WebhookId, - &i.RemainingSlots, ); err != nil { return nil, err } @@ -988,18 +1211,16 @@ UPDATE SET "updatedAt" = CURRENT_TIMESTAMP, "dispatcherId" = coalesce($1::uuid, "dispatcherId"), - "maxRuns" = coalesce($2::int, "maxRuns"), - "lastHeartbeatAt" = coalesce($3::timestamp, "lastHeartbeatAt"), - "isActive" = coalesce($4::boolean, "isActive"), - "isPaused" = coalesce($5::boolean, "isPaused") + "lastHeartbeatAt" = coalesce($2::timestamp, "lastHeartbeatAt"), + "isActive" = coalesce($3::boolean, "isActive"), + "isPaused" = coalesce($4::boolean, "isPaused") WHERE - "id" = $6::uuid + "id" = $5::uuid RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" ` type UpdateWorkerParams struct { DispatcherId *uuid.UUID `json:"dispatcherId"` - MaxRuns pgtype.Int4 `json:"maxRuns"` LastHeartbeatAt pgtype.Timestamp `json:"lastHeartbeatAt"` IsActive pgtype.Bool `json:"isActive"` IsPaused pgtype.Bool `json:"isPaused"` @@ -1009,7 +1230,6 @@ type UpdateWorkerParams struct { func (q *Queries) UpdateWorker(ctx context.Context, db DBTX, arg UpdateWorkerParams) (*Worker, error) { row := db.QueryRow(ctx, updateWorker, arg.DispatcherId, - arg.MaxRuns, arg.LastHeartbeatAt, arg.IsActive, arg.IsPaused, diff --git a/pkg/repository/sqlcv1/workflows.sql b/pkg/repository/sqlcv1/workflows.sql index 8e40372a5b..8d1778587c 100644 --- a/pkg/repository/sqlcv1/workflows.sql +++ b/pkg/repository/sqlcv1/workflows.sql @@ -286,7 +286,8 @@ INSERT INTO "Step" ( "retries", "scheduleTimeout", "retryBackoffFactor", - "retryMaxBackoff" + "retryMaxBackoff", + "isDurable" ) VALUES ( @id::uuid, coalesce(sqlc.narg('createdAt')::timestamp, CURRENT_TIMESTAMP), @@ -301,9 +302,29 @@ INSERT INTO "Step" ( coalesce(sqlc.narg('retries')::integer, 0), coalesce(sqlc.narg('scheduleTimeout')::text, '5m'), sqlc.narg('retryBackoffFactor'), - sqlc.narg('retryMaxBackoff') + sqlc.narg('retryMaxBackoff'), + coalesce(sqlc.narg('isDurable')::boolean, false) ) RETURNING *; +-- name: CreateStepSlotRequests :exec +INSERT INTO v1_step_slot_request ( + tenant_id, + step_id, + slot_type, + units, + created_at, + updated_at +) +SELECT + @tenantId::uuid, + @stepId::uuid, + unnest(@slotTypes::text[]), + unnest(@units::integer[]), + CURRENT_TIMESTAMP, + CURRENT_TIMESTAMP +-- NOTE: ON CONFLICT can be removed after the 0_76_d migration is run to remove insert triggers added in 0_76 +ON CONFLICT (tenant_id, step_id, slot_type) DO NOTHING; + -- name: AddStepParents :exec INSERT INTO "_StepOrder" ("A", "B") SELECT diff --git a/pkg/repository/sqlcv1/workflows.sql.go b/pkg/repository/sqlcv1/workflows.sql.go index f7da3109b7..7e14c302cd 100644 --- a/pkg/repository/sqlcv1/workflows.sql.go +++ b/pkg/repository/sqlcv1/workflows.sql.go @@ -167,7 +167,8 @@ INSERT INTO "Step" ( "retries", "scheduleTimeout", "retryBackoffFactor", - "retryMaxBackoff" + "retryMaxBackoff", + "isDurable" ) VALUES ( $1::uuid, coalesce($2::timestamp, CURRENT_TIMESTAMP), @@ -182,8 +183,9 @@ INSERT INTO "Step" ( coalesce($11::integer, 0), coalesce($12::text, '5m'), $13, - $14 -) RETURNING id, "createdAt", "updatedAt", "deletedAt", "readableId", "tenantId", "jobId", "actionId", timeout, "customUserData", retries, "retryBackoffFactor", "retryMaxBackoff", "scheduleTimeout" + $14, + coalesce($15::boolean, false) +) RETURNING id, "createdAt", "updatedAt", "deletedAt", "readableId", "tenantId", "jobId", "actionId", timeout, "customUserData", retries, "retryBackoffFactor", "retryMaxBackoff", "scheduleTimeout", "isDurable" ` type CreateStepParams struct { @@ -201,6 +203,7 @@ type CreateStepParams struct { ScheduleTimeout pgtype.Text `json:"scheduleTimeout"` RetryBackoffFactor pgtype.Float8 `json:"retryBackoffFactor"` RetryMaxBackoff pgtype.Int4 `json:"retryMaxBackoff"` + IsDurable pgtype.Bool `json:"isDurable"` } func (q *Queries) CreateStep(ctx context.Context, db DBTX, arg CreateStepParams) (*Step, error) { @@ -219,6 +222,7 @@ func (q *Queries) CreateStep(ctx context.Context, db DBTX, arg CreateStepParams) arg.ScheduleTimeout, arg.RetryBackoffFactor, arg.RetryMaxBackoff, + arg.IsDurable, ) var i Step err := row.Scan( @@ -236,6 +240,7 @@ func (q *Queries) CreateStep(ctx context.Context, db DBTX, arg CreateStepParams) &i.RetryBackoffFactor, &i.RetryMaxBackoff, &i.ScheduleTimeout, + &i.IsDurable, ) return &i, err } @@ -443,6 +448,43 @@ func (q *Queries) CreateStepRateLimit(ctx context.Context, db DBTX, arg CreateSt return &i, err } +const createStepSlotRequests = `-- name: CreateStepSlotRequests :exec +INSERT INTO v1_step_slot_request ( + tenant_id, + step_id, + slot_type, + units, + created_at, + updated_at +) +SELECT + $1::uuid, + $2::uuid, + unnest($3::text[]), + unnest($4::integer[]), + CURRENT_TIMESTAMP, + CURRENT_TIMESTAMP +ON CONFLICT (tenant_id, step_id, slot_type) DO NOTHING +` + +type CreateStepSlotRequestsParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Stepid uuid.UUID `json:"stepid"` + Slottypes []string `json:"slottypes"` + Units []int32 `json:"units"` +} + +// NOTE: ON CONFLICT can be removed after the 0_76_d migration is run to remove insert triggers added in 0_76 +func (q *Queries) CreateStepSlotRequests(ctx context.Context, db DBTX, arg CreateStepSlotRequestsParams) error { + _, err := db.Exec(ctx, createStepSlotRequests, + arg.Tenantid, + arg.Stepid, + arg.Slottypes, + arg.Units, + ) + return err +} + const createWorkflow = `-- name: CreateWorkflow :one INSERT INTO "Workflow" ( "id", @@ -984,7 +1026,7 @@ func (q *Queries) GetLatestWorkflowVersionForWorkflows(ctx context.Context, db D const getStepsForJobs = `-- name: GetStepsForJobs :many SELECT j."id" as "jobId", - s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", + s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", s."isDurable", ( SELECT array_agg(so."A")::uuid[] -- Casting the array_agg result to uuid[] FROM "_StepOrder" so @@ -1034,6 +1076,7 @@ func (q *Queries) GetStepsForJobs(ctx context.Context, db DBTX, arg GetStepsForJ &i.Step.RetryBackoffFactor, &i.Step.RetryMaxBackoff, &i.Step.ScheduleTimeout, + &i.Step.IsDurable, &i.Parents, ); err != nil { return nil, err @@ -1550,7 +1593,7 @@ func (q *Queries) ListStepMatchConditions(ctx context.Context, db DBTX, arg List const listStepsByIds = `-- name: ListStepsByIds :many SELECT - s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", + s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", s."isDurable", wv."id" as "workflowVersionId", wv."sticky" as "workflowVersionSticky", w."name" as "workflowName", @@ -1599,6 +1642,7 @@ type ListStepsByIdsRow struct { RetryBackoffFactor pgtype.Float8 `json:"retryBackoffFactor"` RetryMaxBackoff pgtype.Int4 `json:"retryMaxBackoff"` ScheduleTimeout string `json:"scheduleTimeout"` + IsDurable bool `json:"isDurable"` WorkflowVersionId uuid.UUID `json:"workflowVersionId"` WorkflowVersionSticky NullStickyStrategy `json:"workflowVersionSticky"` WorkflowName string `json:"workflowName"` @@ -1632,6 +1676,7 @@ func (q *Queries) ListStepsByIds(ctx context.Context, db DBTX, arg ListStepsById &i.RetryBackoffFactor, &i.RetryMaxBackoff, &i.ScheduleTimeout, + &i.IsDurable, &i.WorkflowVersionId, &i.WorkflowVersionSticky, &i.WorkflowName, @@ -1653,7 +1698,7 @@ func (q *Queries) ListStepsByIds(ctx context.Context, db DBTX, arg ListStepsById const listStepsByWorkflowVersionIds = `-- name: ListStepsByWorkflowVersionIds :many WITH steps AS ( SELECT - s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", + s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", s."isDurable", wv."id" as "workflowVersionId", w."name" as "workflowName", w."id" as "workflowId", @@ -1688,7 +1733,7 @@ WITH steps AS ( so."B" ) SELECT - s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", s."workflowVersionId", s."workflowName", s."workflowId", s."jobKind", s."matchConditionCount", + s.id, s."createdAt", s."updatedAt", s."deletedAt", s."readableId", s."tenantId", s."jobId", s."actionId", s.timeout, s."customUserData", s.retries, s."retryBackoffFactor", s."retryMaxBackoff", s."scheduleTimeout", s."isDurable", s."workflowVersionId", s."workflowName", s."workflowId", s."jobKind", s."matchConditionCount", COALESCE(so."parents", '{}'::uuid[]) as "parents" FROM steps s @@ -1716,6 +1761,7 @@ type ListStepsByWorkflowVersionIdsRow struct { RetryBackoffFactor pgtype.Float8 `json:"retryBackoffFactor"` RetryMaxBackoff pgtype.Int4 `json:"retryMaxBackoff"` ScheduleTimeout string `json:"scheduleTimeout"` + IsDurable bool `json:"isDurable"` WorkflowVersionId uuid.UUID `json:"workflowVersionId"` WorkflowName string `json:"workflowName"` WorkflowId uuid.UUID `json:"workflowId"` @@ -1748,6 +1794,7 @@ func (q *Queries) ListStepsByWorkflowVersionIds(ctx context.Context, db DBTX, ar &i.RetryBackoffFactor, &i.RetryMaxBackoff, &i.ScheduleTimeout, + &i.IsDurable, &i.WorkflowVersionId, &i.WorkflowName, &i.WorkflowId, diff --git a/pkg/repository/tenant_limit.go b/pkg/repository/tenant_limit.go index 8cfe809405..7470980508 100644 --- a/pkg/repository/tenant_limit.go +++ b/pkg/repository/tenant_limit.go @@ -130,10 +130,17 @@ func (t *tenantLimitRepository) GetLimits(ctx context.Context, tenantId uuid.UUI } if limit.Resource == sqlcv1.LimitResourceWORKERSLOT { - workerSlotCount, err := t.queries.CountTenantWorkerSlots(ctx, t.pool, tenantId) + totalSlotsRows, err := t.queries.ListTotalActiveSlotsPerTenant(ctx, t.pool) if err != nil { return nil, err } + var workerSlotCount int32 + for _, row := range totalSlotsRows { + if row.TenantId == tenantId { + workerSlotCount = int32(row.TotalActiveSlots) // nolint: gosec + break + } + } limit.Value = workerSlotCount } diff --git a/pkg/repository/ticker.go b/pkg/repository/ticker.go index 643941ab1a..e59edbcd9d 100644 --- a/pkg/repository/ticker.go +++ b/pkg/repository/ticker.go @@ -5,6 +5,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/user_session.go b/pkg/repository/user_session.go index f0f4db6493..f258231d57 100644 --- a/pkg/repository/user_session.go +++ b/pkg/repository/user_session.go @@ -5,6 +5,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/repository/worker.go b/pkg/repository/worker.go index 5e27ba520a..70f25d5222 100644 --- a/pkg/repository/worker.go +++ b/pkg/repository/worker.go @@ -27,8 +27,8 @@ type CreateWorkerOpts struct { // The id of the dispatcher DispatcherId uuid.UUID `validate:"required"` - // The maximum number of runs this worker can run at a time - MaxRuns *int `validate:"omitempty,gte=1"` + // Slot config for this worker (slot_type -> max units) + SlotConfig map[string]int32 `validate:"omitempty"` // The name of the worker Name string `validate:"required,hatchetName"` @@ -75,21 +75,30 @@ type UpsertWorkerLabelOpts struct { } type WorkerRepository interface { - ListWorkers(tenantId uuid.UUID, opts *ListWorkersOpts) ([]*sqlcv1.ListWorkersWithSlotCountRow, error) - GetWorkerById(workerId uuid.UUID) (*sqlcv1.GetWorkerByIdRow, error) - ListWorkerState(tenantId uuid.UUID, workerId uuid.UUID, maxRuns int) ([]*sqlcv1.ListSemaphoreSlotsWithStateForWorkerRow, error) - CountActiveSlotsPerTenant() (map[uuid.UUID]int64, error) - CountActiveWorkersPerTenant() (map[uuid.UUID]int64, error) - ListActiveSDKsPerTenant() (map[TenantIdSDKTuple]int64, error) + ListWorkers(ctx context.Context, tenantId uuid.UUID, opts *ListWorkersOpts) ([]*sqlcv1.ListWorkersRow, error) + GetWorkerById(ctx context.Context, workerId uuid.UUID) (*sqlcv1.GetWorkerByIdRow, error) + ListTotalActiveSlotsPerTenant(ctx context.Context) (map[uuid.UUID]int64, error) + ListActiveSlotsPerTenantAndSlotType(ctx context.Context) (map[TenantIdSlotTypeTuple]int64, error) + CountActiveWorkersPerTenant(ctx context.Context) (map[uuid.UUID]int64, error) + ListActiveSDKsPerTenant(ctx context.Context) (map[TenantIdSDKTuple]int64, error) // GetWorkerActionsByWorkerId returns a list of actions for a worker - GetWorkerActionsByWorkerId(tenantId uuid.UUID, workerId []uuid.UUID) (map[string][]string, error) + GetWorkerActionsByWorkerId(ctx context.Context, tenantId uuid.UUID, workerId []uuid.UUID) (map[string][]string, error) // GetWorkerWorkflowsByWorkerId returns a list of workflows for a worker - GetWorkerWorkflowsByWorkerId(tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.Workflow, error) + GetWorkerWorkflowsByWorkerId(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.Workflow, error) // ListWorkerLabels returns a list of labels config for a worker - ListWorkerLabels(tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.ListWorkerLabelsRow, error) + ListWorkerLabels(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.ListWorkerLabelsRow, error) + + // ListWorkerSlotConfigs returns slot config for workers. + ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error) + + // ListAvailableSlotsForWorkers returns available slot units by worker for a slot type. + ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotType string) (map[uuid.UUID]int32, error) + + // ListAvailableSlotsForWorkersAndTypes returns available slot units by worker for a set of slot types. + ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotTypes []string) (map[uuid.UUID]map[string]int32, error) // CreateNewWorker creates a new worker for a given tenant. CreateNewWorker(ctx context.Context, tenantId uuid.UUID, opts *CreateWorkerOpts) (*sqlcv1.Worker, error) @@ -125,12 +134,12 @@ func newWorkerRepository(shared *sharedRepository) WorkerRepository { } } -func (w *workerRepository) ListWorkers(tenantId uuid.UUID, opts *ListWorkersOpts) ([]*sqlcv1.ListWorkersWithSlotCountRow, error) { +func (w *workerRepository) ListWorkers(ctx context.Context, tenantId uuid.UUID, opts *ListWorkersOpts) ([]*sqlcv1.ListWorkersRow, error) { if err := w.v.Validate(opts); err != nil { return nil, err } - queryParams := sqlcv1.ListWorkersWithSlotCountParams{ + queryParams := sqlcv1.ListWorkersParams{ Tenantid: tenantId, } @@ -149,11 +158,11 @@ func (w *workerRepository) ListWorkers(tenantId uuid.UUID, opts *ListWorkersOpts } } - workers, err := w.queries.ListWorkersWithSlotCount(context.Background(), w.pool, queryParams) + workers, err := w.queries.ListWorkers(ctx, w.pool, queryParams) if err != nil { if errors.Is(err, pgx.ErrNoRows) { - workers = make([]*sqlcv1.ListWorkersWithSlotCountRow, 0) + workers = make([]*sqlcv1.ListWorkersRow, 0) } else { return nil, fmt.Errorf("could not list workers: %w", err) } @@ -162,41 +171,8 @@ func (w *workerRepository) ListWorkers(tenantId uuid.UUID, opts *ListWorkersOpts return workers, nil } -func (w *workerRepository) GetWorkerById(workerId uuid.UUID) (*sqlcv1.GetWorkerByIdRow, error) { - return w.queries.GetWorkerById(context.Background(), w.pool, workerId) -} - -func (w *workerRepository) ListWorkerState(tenantId uuid.UUID, workerId uuid.UUID, maxRuns int) ([]*sqlcv1.ListSemaphoreSlotsWithStateForWorkerRow, error) { - slots, err := w.queries.ListSemaphoreSlotsWithStateForWorker(context.Background(), w.pool, sqlcv1.ListSemaphoreSlotsWithStateForWorkerParams{ - Workerid: workerId, - Tenantid: tenantId, - Limit: pgtype.Int4{ - Int32: int32(maxRuns), // nolint: gosec - Valid: true, - }, - }) - - if err != nil { - return nil, fmt.Errorf("could not list worker slot state: %w", err) - } - - return slots, nil -} - -func (w *workerRepository) CountActiveSlotsPerTenant() (map[uuid.UUID]int64, error) { - slots, err := w.queries.ListTotalActiveSlotsPerTenant(context.Background(), w.pool) - - if err != nil { - return nil, fmt.Errorf("could not list active slots per tenant: %w", err) - } - - tenantToSlots := make(map[uuid.UUID]int64) - - for _, slot := range slots { - tenantToSlots[slot.TenantId] = slot.TotalActiveSlots - } - - return tenantToSlots, nil +func (w *workerRepository) GetWorkerById(ctx context.Context, workerId uuid.UUID) (*sqlcv1.GetWorkerByIdRow, error) { + return w.queries.GetWorkerById(ctx, w.pool, workerId) } type SDK struct { @@ -211,8 +187,13 @@ type TenantIdSDKTuple struct { SDK SDK } -func (w *workerRepository) ListActiveSDKsPerTenant() (map[TenantIdSDKTuple]int64, error) { - sdks, err := w.queries.ListActiveSDKsPerTenant(context.Background(), w.pool) +type TenantIdSlotTypeTuple struct { + TenantId uuid.UUID + SlotType string +} + +func (w *workerRepository) ListActiveSDKsPerTenant(ctx context.Context) (map[TenantIdSDKTuple]int64, error) { + sdks, err := w.queries.ListActiveSDKsPerTenant(ctx, w.pool) if err != nil { return nil, fmt.Errorf("could not list active sdks per tenant: %w", err) @@ -238,8 +219,39 @@ func (w *workerRepository) ListActiveSDKsPerTenant() (map[TenantIdSDKTuple]int64 return tenantIdSDKTupleToCount, nil } -func (w *workerRepository) CountActiveWorkersPerTenant() (map[uuid.UUID]int64, error) { - workers, err := w.queries.ListActiveWorkersPerTenant(context.Background(), w.pool) +func (w *workerRepository) ListTotalActiveSlotsPerTenant(ctx context.Context) (map[uuid.UUID]int64, error) { + rows, err := w.queries.ListTotalActiveSlotsPerTenant(ctx, w.pool) + if err != nil { + return nil, fmt.Errorf("could not list total active slots per tenant: %w", err) + } + + tenantToSlots := make(map[uuid.UUID]int64, len(rows)) + for _, row := range rows { + tenantToSlots[row.TenantId] = row.TotalActiveSlots + } + + return tenantToSlots, nil +} + +func (w *workerRepository) ListActiveSlotsPerTenantAndSlotType(ctx context.Context) (map[TenantIdSlotTypeTuple]int64, error) { + rows, err := w.queries.ListActiveSlotsPerTenantAndSlotType(ctx, w.pool) + if err != nil { + return nil, fmt.Errorf("could not list active slots per tenant and slot type: %w", err) + } + + res := make(map[TenantIdSlotTypeTuple]int64, len(rows)) + for _, row := range rows { + res[TenantIdSlotTypeTuple{ + TenantId: row.TenantId, + SlotType: row.SlotType, + }] = row.ActiveSlots + } + + return res, nil +} + +func (w *workerRepository) CountActiveWorkersPerTenant(ctx context.Context) (map[uuid.UUID]int64, error) { + workers, err := w.queries.ListActiveWorkersPerTenant(ctx, w.pool) if err != nil { return nil, fmt.Errorf("could not list active workers per tenant: %w", err) @@ -254,8 +266,8 @@ func (w *workerRepository) CountActiveWorkersPerTenant() (map[uuid.UUID]int64, e return tenantToWorkers, nil } -func (w *workerRepository) GetWorkerActionsByWorkerId(tenantId uuid.UUID, workerIds []uuid.UUID) (map[string][]string, error) { - records, err := w.queries.GetWorkerActionsByWorkerId(context.Background(), w.pool, sqlcv1.GetWorkerActionsByWorkerIdParams{ +func (w *workerRepository) GetWorkerActionsByWorkerId(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) (map[string][]string, error) { + records, err := w.queries.GetWorkerActionsByWorkerId(ctx, w.pool, sqlcv1.GetWorkerActionsByWorkerIdParams{ Workerids: workerIds, Tenantid: tenantId, }) @@ -280,15 +292,77 @@ func (w *workerRepository) GetWorkerActionsByWorkerId(tenantId uuid.UUID, worker return workerIdToActionIds, nil } -func (w *workerRepository) GetWorkerWorkflowsByWorkerId(tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.Workflow, error) { - return w.queries.GetWorkerWorkflowsByWorkerId(context.Background(), w.pool, sqlcv1.GetWorkerWorkflowsByWorkerIdParams{ +func (w *workerRepository) GetWorkerWorkflowsByWorkerId(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.Workflow, error) { + return w.queries.GetWorkerWorkflowsByWorkerId(ctx, w.pool, sqlcv1.GetWorkerWorkflowsByWorkerIdParams{ Workerid: workerId, Tenantid: tenantId, }) } -func (w *workerRepository) ListWorkerLabels(tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.ListWorkerLabelsRow, error) { - return w.queries.ListWorkerLabels(context.Background(), w.pool, workerId) +func (w *workerRepository) ListWorkerLabels(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID) ([]*sqlcv1.ListWorkerLabelsRow, error) { + return w.queries.ListWorkerLabels(ctx, w.pool, workerId) +} + +func (w *workerRepository) ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error) { + rows, err := w.queries.ListWorkerSlotConfigs(ctx, w.pool, sqlcv1.ListWorkerSlotConfigsParams{ + Tenantid: tenantId, + Workerids: workerIds, + }) + + if err != nil { + return nil, err + } + + res := make(map[uuid.UUID]map[string]int32) + for _, row := range rows { + if _, ok := res[row.WorkerID]; !ok { + res[row.WorkerID] = make(map[string]int32) + } + res[row.WorkerID][row.SlotType] = row.MaxUnits + } + + return res, nil +} + +func (w *workerRepository) ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotType string) (map[uuid.UUID]int32, error) { + rows, err := w.queries.ListAvailableSlotsForWorkers(ctx, w.pool, sqlcv1.ListAvailableSlotsForWorkersParams{ + Tenantid: tenantId, + Workerids: workerIds, + Slottype: slotType, + }) + + if err != nil { + return nil, fmt.Errorf("could not list available slots for workers: %w", err) + } + + res := make(map[uuid.UUID]int32, len(rows)) + for _, row := range rows { + res[row.ID] = row.AvailableSlots + } + + return res, nil +} + +func (w *workerRepository) ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotTypes []string) (map[uuid.UUID]map[string]int32, error) { + rows, err := w.queries.ListAvailableSlotsForWorkersAndTypes(ctx, w.pool, sqlcv1.ListAvailableSlotsForWorkersAndTypesParams{ + Tenantid: tenantId, + Workerids: workerIds, + Slottypes: slotTypes, + }) + + if err != nil { + return nil, fmt.Errorf("could not list available slots for workers and types: %w", err) + } + + res := make(map[uuid.UUID]map[string]int32) + for _, row := range rows { + if _, ok := res[row.ID]; !ok { + res[row.ID] = make(map[string]int32) + } + res[row.ID][row.SlotType] = row.AvailableSlots + } + + return res, nil } func (w *workerRepository) GetWorkerForEngine(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID) (*sqlcv1.GetWorkerForEngineRow, error) { @@ -305,13 +379,14 @@ func (w *workerRepository) CreateNewWorker(ctx context.Context, tenantId uuid.UU return nil, err } - maxRuns := int32(100) + slotConfig := opts.SlotConfig + slots := int32(0) - if opts.MaxRuns != nil { - maxRuns = int32(*opts.MaxRuns) // nolint: gosec + for _, units := range slotConfig { + slots += units } - preWorkerSlot, postWorkerSlot := w.m.Meter(ctx, sqlcv1.LimitResourceWORKERSLOT, tenantId, maxRuns) + preWorkerSlot, postWorkerSlot := w.m.Meter(ctx, sqlcv1.LimitResourceWORKERSLOT, tenantId, slots) if err := preWorkerSlot(); err != nil { return nil, err @@ -341,20 +416,6 @@ func (w *workerRepository) CreateNewWorker(ctx context.Context, tenantId uuid.UU Valid: true, } - if opts.MaxRuns != nil { - createParams.MaxRuns = pgtype.Int4{ - Int32: int32(*opts.MaxRuns), // nolint: gosec - Valid: true, - } - } else { - createParams.MaxRuns = pgtype.Int4{ - Int32: 100, - Valid: true, - } - } - - var worker *sqlcv1.Worker - if opts.RuntimeInfo != nil { if opts.RuntimeInfo.SdkVersion != nil { createParams.SdkVersion = sqlchelpers.TextFromStr(*opts.RuntimeInfo.SdkVersion) @@ -396,11 +457,29 @@ func (w *workerRepository) CreateNewWorker(ctx context.Context, tenantId uuid.UU } } - if worker == nil { - worker, err = w.queries.CreateWorker(ctx, tx, createParams) + worker, err := w.queries.CreateWorker(ctx, tx, createParams) + + if err != nil { + return nil, fmt.Errorf("could not create worker: %w", err) + } + + slotTypes := make([]string, 0) + maxUnits := make([]int32, 0) + for slotType, units := range slotConfig { + slotTypes = append(slotTypes, slotType) + maxUnits = append(maxUnits, units) + } + + if len(slotTypes) > 0 { + err = w.queries.CreateWorkerSlotConfigs(ctx, tx, sqlcv1.CreateWorkerSlotConfigsParams{ + Tenantid: tenantId, + Workerid: worker.ID, + Slottypes: slotTypes, + Maxunits: maxUnits, + }) if err != nil { - return nil, fmt.Errorf("could not create worker: %w", err) + return nil, fmt.Errorf("could not create worker slot config: %w", err) } } diff --git a/pkg/repository/workflow.go b/pkg/repository/workflow.go index 79b34cc694..6ba98737ec 100644 --- a/pkg/repository/workflow.go +++ b/pkg/repository/workflow.go @@ -100,6 +100,12 @@ type CreateStepOpts struct { // (optional) the step retry backoff max seconds (can't be greater than 86400) RetryBackoffMaxSeconds *int `validate:"omitnil,min=1,max=86400"` + // (optional) whether this step is durable + IsDurable bool `json:"isDurable,omitempty"` + + // (optional) slot requests for this step (slot_type -> units) + SlotRequests map[string]int32 `json:"slotRequests,omitempty" validate:"omitempty,dive,keys,required,endkeys,gt=0"` + // (optional) a list of additional trigger conditions TriggerConditions []CreateStepMatchConditionOpt `validate:"omitempty,dive"` @@ -727,6 +733,7 @@ func (r *workflowRepository) createJobTx(ctx context.Context, tx sqlcv1.DBTX, te Readableid: stepOpts.ReadableId, CustomUserData: customUserData, Retries: retries, + IsDurable: sqlchelpers.BoolFromBoolean(stepOpts.IsDurable), } if stepOpts.ScheduleTimeout != nil { @@ -757,6 +764,45 @@ func (r *workflowRepository) createJobTx(ctx context.Context, tx sqlcv1.DBTX, te return nil, err } + slotRequests := stepOpts.SlotRequests + if len(slotRequests) == 0 { + if stepOpts.IsDurable { + slotRequests = map[string]int32{SlotTypeDurable: 1} + } else { + slotRequests = map[string]int32{SlotTypeDefault: 1} + } + } + + slotTypes := make([]string, 0, len(slotRequests)) + units := make([]int32, 0, len(slotRequests)) + for slotType, unit := range slotRequests { + if unit <= 0 { + continue + } + slotTypes = append(slotTypes, slotType) + units = append(units, unit) + } + + if len(slotTypes) == 0 { + slotTypes = append(slotTypes, SlotTypeDefault) + units = append(units, 1) + } + + err = r.queries.CreateStepSlotRequests( + ctx, + tx, + sqlcv1.CreateStepSlotRequestsParams{ + Tenantid: tenantId, + Stepid: stepId, + Slottypes: slotTypes, + Units: units, + }, + ) + + if err != nil { + return nil, err + } + // upsert the queue based on the action // note: we don't use the postCommit func, it just sets the queue in the cache which is not necessary for writing a // workflow version, only when we're inserting a bunch of tasks for that queue @@ -1230,6 +1276,18 @@ func checksumV1(opts *CreateWorkflowVersionOpts) (string, *CreateWorkflowVersion } } + // Normalize fields for backwards-compatible checksums: + // default values that didn't exist before this feature should not change the hash. + for i := range opts.Tasks { + // SlotRequests={"default": 1}is the new default; strip it so it doesn't affect the hash. + sr := opts.Tasks[i].SlotRequests + if len(sr) == 1 { + if units, ok := sr[SlotTypeDefault]; ok && units == 1 { + opts.Tasks[i].SlotRequests = nil + } + } + } + // compute a checksum for the workflow declaredValues, err := datautils.ToJSONMap(opts) diff --git a/pkg/repository/workflow_checksum_test.go b/pkg/repository/workflow_checksum_test.go new file mode 100644 index 0000000000..2366974f17 --- /dev/null +++ b/pkg/repository/workflow_checksum_test.go @@ -0,0 +1,157 @@ +package repository + +import ( + "testing" +) + +func TestChecksumV1_BackwardsCompatibility(t *testing.T) { + // Compute a baseline checksum with no IsDurable or SlotRequests fields set + // (simulating a pre-feature workflow registration). + baselineOpts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + }, + }, + } + + baselineChecksum, _, err := checksumV1(baselineOpts) + if err != nil { + t.Fatalf("unexpected error computing baseline checksum: %v", err) + } + + t.Run("IsDurable false does not change hash", func(t *testing.T) { + opts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + IsDurable: false, + }, + }, + } + + cs, _, err := checksumV1(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cs != baselineChecksum { + t.Errorf("IsDurable=false changed the hash\n baseline: %s\n got: %s", baselineChecksum, cs) + } + }) + + t.Run("SlotRequests default:1 does not change hash", func(t *testing.T) { + opts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + SlotRequests: map[string]int32{SlotTypeDefault: 1}, + }, + }, + } + + cs, _, err := checksumV1(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cs != baselineChecksum { + t.Errorf("SlotRequests={default:1} changed the hash\n baseline: %s\n got: %s", baselineChecksum, cs) + } + }) + + t.Run("IsDurable false and SlotRequests default:1 together do not change hash", func(t *testing.T) { + opts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + IsDurable: false, + SlotRequests: map[string]int32{SlotTypeDefault: 1}, + }, + }, + } + + cs, _, err := checksumV1(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cs != baselineChecksum { + t.Errorf("IsDurable=false + SlotRequests={default:1} changed the hash\n baseline: %s\n got: %s", baselineChecksum, cs) + } + }) + + t.Run("IsDurable true changes hash", func(t *testing.T) { + opts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + IsDurable: true, + }, + }, + } + + cs, _, err := checksumV1(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cs == baselineChecksum { + t.Error("IsDurable=true should change the hash, but it did not") + } + }) + + t.Run("custom SlotRequests changes hash", func(t *testing.T) { + opts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + SlotRequests: map[string]int32{"gpu": 2}, + }, + }, + } + + cs, _, err := checksumV1(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cs == baselineChecksum { + t.Error("SlotRequests={gpu:2} should change the hash, but it did not") + } + }) + + t.Run("SlotRequests default:2 changes hash", func(t *testing.T) { + opts := &CreateWorkflowVersionOpts{ + Name: "test-workflow", + Tasks: []CreateStepOpts{ + { + ReadableId: "step1", + Action: "default:step1", + SlotRequests: map[string]int32{SlotTypeDefault: 2}, + }, + }, + } + + cs, _, err := checksumV1(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cs == baselineChecksum { + t.Error("SlotRequests={default:2} should change the hash, but it did not") + } + }) +} diff --git a/pkg/scheduling/v1/action.go b/pkg/scheduling/v1/action.go index 1d54247cfd..096bf9950a 100644 --- a/pkg/scheduling/v1/action.go +++ b/pkg/scheduling/v1/action.go @@ -3,6 +3,8 @@ package v1 import ( "slices" "sync" + + "github.com/google/uuid" ) type action struct { @@ -14,6 +16,13 @@ type action struct { // note that slots can be used across multiple actions, hence the pointer slots []*slot + + // slotsByTypeAndWorkerId indexes slots by slotType -> workerId -> slots. + // + // NOTE: this index contains pointers to the same slot objects as slots. + // It is built/replaced during replenish under a.mu and read during assignment + // under a.mu (RLock or Lock). + slotsByTypeAndWorkerId map[string]map[uuid.UUID][]*slot } func (a *action) activeCount() int { diff --git a/pkg/scheduling/v1/extension.go b/pkg/scheduling/v1/extension.go index 5a2a2c1750..27c65b4d5e 100644 --- a/pkg/scheduling/v1/extension.go +++ b/pkg/scheduling/v1/extension.go @@ -6,6 +6,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/scheduling/v1/lease_manager.go b/pkg/scheduling/v1/lease_manager.go index f8baf2831f..f59596156d 100644 --- a/pkg/scheduling/v1/lease_manager.go +++ b/pkg/scheduling/v1/lease_manager.go @@ -463,49 +463,55 @@ func (l *LeaseManager) notifyNewConcurrencyStrategy(ctx context.Context, strateg return nil } -// loopForLeases acquires new leases every 5 seconds for workers, queues, and concurrency strategies -func (l *LeaseManager) loopForLeases(ctx context.Context) { - ticker := time.NewTicker(5 * time.Second) +func (l *LeaseManager) acquireAllLeases(ctx context.Context) { + loopCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - // we don't want to block the cleanup process, so we use a separate context with a timeout - loopCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + wg := sync.WaitGroup{} - wg := sync.WaitGroup{} + wg.Add(3) - wg.Add(3) + go func() { + defer wg.Done() - go func() { - defer wg.Done() + if err := l.acquireWorkerLeases(loopCtx); err != nil { + l.conf.l.Error().Err(err).Msg("error acquiring worker leases") + } + }() - if err := l.acquireWorkerLeases(loopCtx); err != nil { - l.conf.l.Error().Err(err).Msg("error acquiring worker leases") - } - }() + go func() { + defer wg.Done() - go func() { - defer wg.Done() + if err := l.acquireQueueLeases(loopCtx); err != nil { + l.conf.l.Error().Err(err).Msg("error acquiring queue leases") + } + }() - if err := l.acquireQueueLeases(loopCtx); err != nil { - l.conf.l.Error().Err(err).Msg("error acquiring queue leases") - } - }() + go func() { + defer wg.Done() - go func() { - defer wg.Done() + if err := l.acquireConcurrencyLeases(loopCtx); err != nil { + l.conf.l.Error().Err(err).Msg("error acquiring concurrency leases") + } + }() + + wg.Wait() +} - if err := l.acquireConcurrencyLeases(loopCtx); err != nil { - l.conf.l.Error().Err(err).Msg("error acquiring concurrency leases") - } - }() +// loopForLeases acquires new leases every 5 seconds for workers, queues, and concurrency strategies +func (l *LeaseManager) loopForLeases(ctx context.Context) { + // Perform an initial lease acquisition immediately so that callers don't have to wait + // for the first ticker interval before workers, queues, and concurrency strategies are discovered. + l.acquireAllLeases(ctx) - wg.Wait() + ticker := time.NewTicker(5 * time.Second) - cancel() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + l.acquireAllLeases(ctx) } } } diff --git a/pkg/scheduling/v1/prometheus_extension.go b/pkg/scheduling/v1/prometheus_extension.go index 95530d1390..a8f3b6c13d 100644 --- a/pkg/scheduling/v1/prometheus_extension.go +++ b/pkg/scheduling/v1/prometheus_extension.go @@ -4,6 +4,7 @@ import ( "sync" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/integrations/metrics/prometheus" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) diff --git a/pkg/scheduling/v1/queuer.go b/pkg/scheduling/v1/queuer.go index ff33aab3c1..8c4104114a 100644 --- a/pkg/scheduling/v1/queuer.go +++ b/pkg/scheduling/v1/queuer.go @@ -213,7 +213,21 @@ func (q *Queuer) loopQueue(ctx context.Context) { desiredLabelsTime := time.Since(checkpoint) checkpoint = time.Now() - assignCh := q.s.tryAssign(ctx, qis, labels, rls) + stepRequests, err := q.repo.GetStepSlotRequests(ctx, nil, stepIds) + + if err != nil { + span.RecordError(err) + span.End() + q.l.Error().Err(err).Msg("error getting step slot requests") + + q.unackedToUnassigned(qis) + continue + } + + getSlotRequestsTime := time.Since(checkpoint) + checkpoint = time.Now() + + assignCh := q.s.tryAssign(ctx, qis, labels, stepRequests, rls) count := 0 countMu := sync.Mutex{} @@ -292,6 +306,8 @@ func (q *Queuer) loopQueue(ctx context.Context) { "rate_limit_time", rateLimitTime, ).Dur( "desired_labels_time", desiredLabelsTime, + ).Dur( + "get_slot_requests_time", getSlotRequestsTime, ).Dur( "assign_time", assignTime, ).Msgf("queue %s took longer than 100ms (%s) to process %d items", q.queueName, elapsed, len(qis)) @@ -600,7 +616,12 @@ func (q *Queuer) runOptimisticQueue( return nil, nil, err } - assignCh := q.s.tryAssign(ctx, qis, labels, rls) + stepRequests, err := q.repo.GetStepSlotRequests(ctx, tx, stepIds) + if err != nil { + return nil, nil, err + } + + assignCh := q.s.tryAssign(ctx, qis, labels, stepRequests, rls) var allLocalAssigned []*v1.AssignedItem var allQueueResults []*QueueResults diff --git a/pkg/scheduling/v1/scheduler.go b/pkg/scheduling/v1/scheduler.go index d2ea59b17f..edcf375460 100644 --- a/pkg/scheduling/v1/scheduler.go +++ b/pkg/scheduling/v1/scheduler.go @@ -2,7 +2,10 @@ package v1 import ( "context" + "fmt" "math/rand" + "slices" + "strings" "sync" "time" @@ -38,7 +41,7 @@ type Scheduler struct { // unackedSlots are slots which have been assigned to a worker, but have not been flushed // to the database yet. They negatively count towards a worker's available slot count. - unackedSlots map[int]*slot + unackedSlots map[int]*assignedSlots unackedMu mutex rl *rateLimiter @@ -53,7 +56,7 @@ func newScheduler(cf *sharedConfig, tenantId uuid.UUID, rl *rateLimiter, exts *E tenantId: tenantId, l: &l, actions: make(map[string]*action), - unackedSlots: make(map[int]*slot), + unackedSlots: make(map[int]*assignedSlots), rl: rl, actionsMu: newRWMu(cf.l), replenishMu: newMu(cf.l), @@ -70,8 +73,8 @@ func (s *Scheduler) ack(ids []int) { defer s.unackedMu.Unlock() for _, id := range ids { - if slot, ok := s.unackedSlots[id]; ok { - slot.ack() + if assigned, ok := s.unackedSlots[id]; ok { + assigned.ack() delete(s.unackedSlots, id) } } @@ -82,8 +85,8 @@ func (s *Scheduler) nack(ids []int) { defer s.unackedMu.Unlock() for _, id := range ids { - if slot, ok := s.unackedSlots[id]; ok { - slot.nack() + if assigned, ok := s.unackedSlots[id]; ok { + assigned.nack() delete(s.unackedSlots, id) } } @@ -201,7 +204,8 @@ func (s *Scheduler) replenish(ctx context.Context, mustReplenish bool) error { // if the action is not in the map, it should be replenished if _, ok := s.actions[actionId]; !ok { newAction := &action{ - actionId: actionId, + actionId: actionId, + slotsByTypeAndWorkerId: make(map[string]map[uuid.UUID][]*slot), } actionsToReplenish[actionId] = newAction @@ -256,82 +260,182 @@ func (s *Scheduler) replenish(ctx context.Context, mustReplenish bool) error { checkpoint = time.Now() // FUNCTION 2: for each action which should be replenished, load the available slots - uniqueWorkerIds := make(map[uuid.UUID]bool) + workerSlotConfigs, err := s.repo.ListWorkerSlotConfigs(ctx, s.tenantId, workerIds) + if err != nil { + return err + } - for actionId := range actionsToReplenish { - workerIds := actionsToWorkerIds[actionId] + workerSlotTypes := make(map[uuid.UUID]map[string]bool, len(workerSlotConfigs)) + slotTypeToWorkerIds := make(map[string]map[uuid.UUID]bool) - for _, workerId := range workerIds { - uniqueWorkerIds[workerId] = true + for _, config := range workerSlotConfigs { + if _, ok := workerSlotTypes[config.WorkerID]; !ok { + workerSlotTypes[config.WorkerID] = make(map[string]bool) } - } - workerUUIDs := make([]uuid.UUID, 0, len(uniqueWorkerIds)) + workerSlotTypes[config.WorkerID][config.SlotType] = true - for workerId := range uniqueWorkerIds { - workerUUIDs = append(workerUUIDs, workerId) + if _, ok := slotTypeToWorkerIds[config.SlotType]; !ok { + slotTypeToWorkerIds[config.SlotType] = make(map[uuid.UUID]bool) + } + + slotTypeToWorkerIds[config.SlotType][config.WorkerID] = true } - orderedLock(actionsToReplenish) - unlock := orderedUnlock(actionsToReplenish) + // We may update slots for any action that is active on a worker with slot capacity. + // Since tryAssignBatch can hold action.mu without holding actionsMu, we must lock every + // action we might write to here (not just the subset that triggered a replenish). + actionsToLock := make(map[string]*action) + for _, workerSet := range slotTypeToWorkerIds { + for workerId := range workerSet { + for _, actionId := range workerIdsToActions[workerId] { + if a := s.actions[actionId]; a != nil { + actionsToLock[actionId] = a + } + } + } + } + + orderedLock(actionsToLock) + unlock := orderedUnlock(actionsToLock) defer unlock() s.unackedMu.Lock() defer s.unackedMu.Unlock() - availableSlots, err := s.repo.ListAvailableSlotsForWorkers(ctx, s.tenantId, sqlcv1.ListAvailableSlotsForWorkersParams{ - Tenantid: s.tenantId, - Workerids: workerUUIDs, - }) + availableSlotsByType := make(map[string]map[uuid.UUID]int, len(slotTypeToWorkerIds)) - if err != nil { - return err + slotTypes := make([]string, 0, len(slotTypeToWorkerIds)) + workerUUIDSet := make(map[uuid.UUID]struct{}) + + for slotType, workerSet := range slotTypeToWorkerIds { + slotTypes = append(slotTypes, slotType) + + // Preserve the prior behavior of creating a map entry per slot type even if it ends up empty. + if _, ok := availableSlotsByType[slotType]; !ok { + availableSlotsByType[slotType] = make(map[uuid.UUID]int, len(workerSet)) + } + + for workerId := range workerSet { + workerUUIDSet[workerId] = struct{}{} + } + } + + if len(slotTypes) > 0 && len(workerUUIDSet) > 0 { + workerUUIDs := make([]uuid.UUID, 0, len(workerUUIDSet)) + for workerId := range workerUUIDSet { + workerUUIDs = append(workerUUIDs, workerId) + } + + availableSlots, err := s.repo.ListAvailableSlotsForWorkersAndTypes(ctx, s.tenantId, sqlcv1.ListAvailableSlotsForWorkersAndTypesParams{ + Tenantid: s.tenantId, + Workerids: workerUUIDs, + Slottypes: slotTypes, + }) + if err != nil { + return err + } + + for _, row := range availableSlots { + if _, ok := availableSlotsByType[row.SlotType]; !ok { + availableSlotsByType[row.SlotType] = make(map[uuid.UUID]int) + } + + availableSlotsByType[row.SlotType][row.ID] = int(row.AvailableSlots) + } } s.l.Debug().Msgf("loading available slots took %s", time.Since(checkpoint)) // FUNCTION 3: list unacked slots (so they're not counted towards the worker slot count) - workersToUnackedSlots := make(map[uuid.UUID][]*slot) + workersToUnackedSlots := make(map[uuid.UUID]map[string][]*slot) for _, unackedSlot := range s.unackedSlots { - s := unackedSlot - workerId := s.getWorkerId() + for _, assignedSlot := range unackedSlot.slots { + workerId := assignedSlot.getWorkerId() - if _, ok := workersToUnackedSlots[workerId]; !ok { - workersToUnackedSlots[workerId] = make([]*slot, 0) - } + slotType, err := assignedSlot.getSlotType() + if err != nil { + return fmt.Errorf("could not get slot type for unacked slot: %w", err) + } - workersToUnackedSlots[workerId] = append(workersToUnackedSlots[workerId], s) + if _, ok := workersToUnackedSlots[workerId]; !ok { + workersToUnackedSlots[workerId] = make(map[string][]*slot) + } + + workersToUnackedSlots[workerId][slotType] = append(workersToUnackedSlots[workerId][slotType], assignedSlot) + } } // FUNCTION 4: write the new slots to the scheduler and clean up expired slots actionsToNewSlots := make(map[string][]*slot) actionsToTotalSlots := make(map[string]int) + actionsToSlotsByType := make(map[string]map[string]map[uuid.UUID][]*slot) + + // metaCache interns slotMeta so slots with identical metadata share the same pointer. + // Key format: slotType + "\x00" + strings.Join(sortedUniqueActions, "\x00") + metaCache := make(map[string]*slotMeta) + + for slotType, availableSlotsByWorker := range availableSlotsByType { + for workerId, availableSlots := range availableSlotsByWorker { + actions := workerIdsToActions[workerId] + unackedSlots := workersToUnackedSlots[workerId][slotType] + + // create a slot for each available slot + slots := make([]*slot, 0) + availableCount := availableSlots - len(unackedSlots) + if availableCount < 0 { + availableCount = 0 + } - for _, worker := range availableSlots { - workerId := worker.ID - actions := workerIdsToActions[workerId] - unackedSlots := workersToUnackedSlots[workerId] + // Canonicalize actions to increase cache hits across workers. + // Order doesn't matter for correctness anywhere in scheduling. + if len(actions) > 1 { + slices.Sort(actions) + actions = slices.Compact(actions) + } - // create a slot for each available slot - slots := make([]*slot, 0) + metaKey := slotType + if len(actions) > 0 { + metaKey = slotType + "\x00" + strings.Join(actions, "\x00") + } - for i := 0; i < int(worker.AvailableSlots)-len(unackedSlots); i++ { - slots = append(slots, newSlot(workers[workerId], actions)) - } + meta := metaCache[metaKey] + if meta == nil { + meta = newSlotMeta(actions, slotType) + metaCache[metaKey] = meta + } - // extend expiry of all unacked slots - for _, unackedSlot := range unackedSlots { - unackedSlot.extendExpiry() - } + for i := 0; i < availableCount; i++ { + slots = append(slots, newSlot(workers[workerId], meta)) + } - s.l.Debug().Msgf("worker %s has %d total slots, %d unacked slots", workerId, worker.AvailableSlots, len(unackedSlots)) + // extend expiry of all unacked slots + for _, unackedSlot := range unackedSlots { + unackedSlot.extendExpiry() + } + + s.l.Debug().Msgf("worker %s has %d total slots (%s), %d unacked slots", workerId, availableSlots, slotType, len(unackedSlots)) + + slots = append(slots, unackedSlots...) + + for _, actionId := range actions { + if s.actions[actionId] == nil { + continue + } - slots = append(slots, unackedSlots...) + actionsToNewSlots[actionId] = append(actionsToNewSlots[actionId], slots...) + actionsToTotalSlots[actionId] += len(slots) - for _, actionId := range actions { - actionsToNewSlots[actionId] = append(actionsToNewSlots[actionId], slots...) - actionsToTotalSlots[actionId] += len(slots) + if _, ok := actionsToSlotsByType[actionId]; !ok { + actionsToSlotsByType[actionId] = make(map[string]map[uuid.UUID][]*slot) + } + if _, ok := actionsToSlotsByType[actionId][slotType]; !ok { + actionsToSlotsByType[actionId][slotType] = make(map[uuid.UUID][]*slot) + } + // Reuse the per-worker/per-type slice for each action on that worker. + actionsToSlotsByType[actionId][slotType][workerId] = slots + } } } @@ -340,14 +444,21 @@ func (s *Scheduler) replenish(ctx context.Context, mustReplenish bool) error { // first pass: write all actions with new slots to the scheduler for actionId, newSlots := range actionsToNewSlots { + storedAction := actionsToLock[actionId] + if storedAction == nil { + // Defensive: actionsToNewSlots should only contain actions for workers we locked above. + continue + } + // randomly sort the slots randSource.Shuffle(len(newSlots), func(i, j int) { newSlots[i], newSlots[j] = newSlots[j], newSlots[i] }) // we overwrite the slots for the action. we know that the action is in the map because we checked // for it in the first pass. - s.actions[actionId].slots = newSlots - s.actions[actionId].lastReplenishedSlotCount = actionsToTotalSlots[actionId] - s.actions[actionId].lastReplenishedWorkerCount = len(actionsToWorkerIds[actionId]) + storedAction.slots = newSlots + storedAction.slotsByTypeAndWorkerId = actionsToSlotsByType[actionId] + storedAction.lastReplenishedSlotCount = actionsToTotalSlots[actionId] + storedAction.lastReplenishedWorkerCount = len(actionsToWorkerIds[actionId]) s.l.Debug().Msgf("before cleanup, action %s has %d slots", actionId, len(newSlots)) } @@ -357,14 +468,30 @@ func (s *Scheduler) replenish(ctx context.Context, mustReplenish bool) error { newSlots := make([]*slot, 0, len(storedAction.slots)) for i := range storedAction.slots { - slot := storedAction.slots[i] + slotItem := storedAction.slots[i] - if !slot.expired() { - newSlots = append(newSlots, slot) + if !slotItem.expired() { + newSlots = append(newSlots, slotItem) } } storedAction.slots = newSlots + storedAction.slotsByTypeAndWorkerId = make(map[string]map[uuid.UUID][]*slot) + + for _, slotItem := range newSlots { + slotType, err := slotItem.getSlotType() + if err != nil { + return fmt.Errorf("could not get slot type during cleanup: %w", err) + } + + workerId := slotItem.getWorkerId() + + if _, ok := storedAction.slotsByTypeAndWorkerId[slotType]; !ok { + storedAction.slotsByTypeAndWorkerId[slotType] = make(map[uuid.UUID][]*slot) + } + + storedAction.slotsByTypeAndWorkerId[slotType][workerId] = append(storedAction.slotsByTypeAndWorkerId[slotType][workerId], slotItem) + } s.l.Debug().Msgf("after cleanup, action %s has %d slots", storedAction.actionId, len(newSlots)) } @@ -403,6 +530,7 @@ func (s *Scheduler) loopReplenish(ctx context.Context) { } cancel() } + } } @@ -410,8 +538,8 @@ func (s *Scheduler) loopSnapshot(ctx context.Context) { ticker := randomticker.NewRandomTicker(10*time.Millisecond, 90*time.Millisecond) defer ticker.Stop() + count := 0 for { - count := 0 select { case <-ctx.Done(): @@ -479,6 +607,7 @@ func (s *Scheduler) tryAssignBatch( // slots concurrently. ringOffset int, stepIdsToLabels map[uuid.UUID][]*sqlcv1.GetDesiredLabelsRow, + stepIdsToRequests map[uuid.UUID]map[string]int32, taskIdsToRateLimits map[int64]map[string]int32, ) ( res []*assignSingleResult, newRingOffset int, err error, @@ -550,24 +679,42 @@ func (s *Scheduler) tryAssignBatch( // NOTE: if we change the position of this lock, make sure that we are still acquiring locks in the same // order as the replenish() function, otherwise we may deadlock. s.actionsMu.RLock() - action, ok := s.actions[actionId] + s.actionsMu.RUnlock() - if !ok || len(action.slots) == 0 { - s.actionsMu.RUnlock() + if !ok || action == nil { + s.l.Debug().Msgf("no action %s", actionId) + + // Treat missing action as "no slots" for any non-rate-limited queue item. + for i := range res { + if res[i].rateLimitResult != nil { + continue + } + res[i].noSlots = true + rlNacks[i]() + } + + return res, newRingOffset, nil + } + + action.mu.RLock() + if len(action.slots) == 0 { + action.mu.RUnlock() s.l.Debug().Msgf("no slots for action %s", actionId) // if the action is not in the map, then we have no slots to assign to for i := range res { + if res[i].rateLimitResult != nil { + continue + } res[i].noSlots = true rlNacks[i]() } return res, newRingOffset, nil } - - s.actionsMu.RUnlock() + action.mu.RUnlock() action.mu.Lock() defer action.mu.Unlock() @@ -592,12 +739,28 @@ func (s *Scheduler) tryAssignBatch( qi := qis[i] + labels := []*sqlcv1.GetDesiredLabelsRow(nil) + if stepIdsToLabels != nil { + labels = stepIdsToLabels[qi.StepID] + } + + // Backwards-compatible default: if no slot requests are provided for a step, + // assume it needs 1 default slot. + requests := map[string]int32{v1.SlotTypeDefault: 1} + if stepIdsToRequests != nil { + if r, ok := stepIdsToRequests[qi.StepID]; ok && len(r) > 0 { + requests = r + } + } + singleRes, err := s.tryAssignSingleton( ctx, qi, + action, candidateSlots, childRingOffset, - stepIdsToLabels[qi.StepID], + labels, + requests, rlAcks[i], rlNacks[i], ) @@ -619,41 +782,140 @@ func (s *Scheduler) tryAssignBatch( return res, newRingOffset, nil } -func findSlot( +func findAssignableSlots( candidateSlots []*slot, + action *action, + requests map[string]int32, rateLimitAck func(), rateLimitNack func(), -) *slot { - var assignedSlot *slot +) *assignedSlots { + // NOTE: the caller must hold action.mu (RLock or Lock) while calling this + // function. We read from action.slots, which is replaced during replenish + // under action.mu. + seenWorkers := make(map[uuid.UUID]struct{}) + + for _, candidateSlot := range candidateSlots { + if !candidateSlot.active() { + continue + } + + workerId := candidateSlot.getWorkerId() + if _, seen := seenWorkers[workerId]; seen { + continue + } + seenWorkers[workerId] = struct{}{} - for _, slot := range candidateSlots { - if !slot.active() { + selected, ok := selectSlotsForWorker(action.slotsByTypeAndWorkerId, workerId, requests) + if !ok { continue } - if !slot.use([]func(){rateLimitAck}, []func(){rateLimitNack}) { + usedSlots, ok := useSelectedSlots(selected) + if !ok { + continue + } + + // Rate limit callbacks are stored at assignedSlots level, + // not on individual slots. They're called once when the + // entire assignment is acked/nacked. + return &assignedSlots{ + slots: usedSlots, + rateLimitAck: rateLimitAck, + rateLimitNack: rateLimitNack, + } + } + + return nil +} + +// useSelectedSlots attempts to reserve each slot in order. If any slot cannot be +// reserved, it rolls back by nacking any slots already reserved in this call. +func useSelectedSlots(selected []*slot) ([]*slot, bool) { + usedSlots := make([]*slot, 0, len(selected)) + + for _, sl := range selected { + if !sl.use(nil, nil) { + for _, used := range usedSlots { + used.nack() + } + return nil, false + } + usedSlots = append(usedSlots, sl) + } + + return usedSlots, true +} + +func selectSlotsForWorker( + slotsByType map[string]map[uuid.UUID][]*slot, + workerId uuid.UUID, + requests map[string]int32, +) ([]*slot, bool) { + // Pre-size the selection slice to the total number of requested units. + totalNeeded := 0 + for _, units := range requests { + if units > 0 { + totalNeeded += int(units) + } + } + + selected := make([]*slot, 0, totalNeeded) + + for slotType, units := range requests { + if units <= 0 { continue } - assignedSlot = slot - break + slotsByWorker, ok := slotsByType[slotType] + if !ok { + return nil, false + } + + workerSlots := slotsByWorker[workerId] + if len(workerSlots) == 0 { + return nil, false + } + + needed := int(units) + found := 0 + + for _, s := range workerSlots { + if !s.active() { + continue + } + selected = append(selected, s) + found++ + if found >= needed { + break + } + } + + if found < needed { + return nil, false + } } - return assignedSlot + return selected, true } // tryAssignSingleton attempts to assign a singleton step to a worker. func (s *Scheduler) tryAssignSingleton( ctx context.Context, qi *sqlcv1.V1QueueItem, + action *action, candidateSlots []*slot, ringOffset int, labels []*sqlcv1.GetDesiredLabelsRow, + requests map[string]int32, rateLimitAck func(), rateLimitNack func(), ) ( res assignSingleResult, err error, ) { + // NOTE: the caller must hold action.mu (RLock or Lock) while calling this + // function. We read from action.slots, which is replaced during replenish + // under action.mu. + ctx, span := telemetry.NewSpan(ctx, "try-assign-singleton") // nolint: ineffassign defer span.End() @@ -666,10 +928,10 @@ func (s *Scheduler) tryAssignSingleton( ringOffset = 0 } - assignedSlot := findSlot(candidateSlots[ringOffset:], rateLimitAck, rateLimitNack) + assignedSlot := findAssignableSlots(candidateSlots[ringOffset:], action, requests, rateLimitAck, rateLimitNack) if assignedSlot == nil { - assignedSlot = findSlot(candidateSlots[:ringOffset], rateLimitAck, rateLimitNack) + assignedSlot = findAssignableSlots(candidateSlots[:ringOffset], action, requests, rateLimitAck, rateLimitNack) } if assignedSlot == nil { @@ -686,7 +948,13 @@ func (s *Scheduler) tryAssignSingleton( s.unackedSlots[res.ackId] = assignedSlot s.unackedMu.Unlock() - res.workerId = assignedSlot.getWorkerId() + res.workerId = assignedSlot.workerId() + if res.workerId == uuid.Nil { + s.l.Error().Msgf("assigned slot %d has no worker id, skipping assignment", res.ackId) + res.noSlots = true + return res, nil + } + res.succeeded = true return res, nil @@ -711,6 +979,7 @@ func (s *Scheduler) tryAssign( ctx context.Context, qis []*sqlcv1.V1QueueItem, stepIdsToLabels map[uuid.UUID][]*sqlcv1.GetDesiredLabelsRow, + stepIdsToRequests map[uuid.UUID]map[string]int32, taskIdsToRateLimits map[int64]map[string]int32, ) <-chan *assignResults { ctx, span := telemetry.NewSpan(ctx, "try-assign") @@ -757,7 +1026,6 @@ func (s *Scheduler) tryAssign( batched := make([]*sqlcv1.V1QueueItem, 0) schedulingTimedOut := make([]*sqlcv1.V1QueueItem, 0, len(qis)) - for i := range qis { qi := qis[i] @@ -781,7 +1049,7 @@ func (s *Scheduler) tryAssign( batchStart := time.Now() - results, newRingOffset, err := s.tryAssignBatch(ctx, actionId, batchQis, ringOffset, stepIdsToLabels, taskIdsToRateLimits) + results, newRingOffset, err := s.tryAssignBatch(ctx, actionId, batchQis, ringOffset, stepIdsToLabels, stepIdsToRequests, taskIdsToRateLimits) if err != nil { return err @@ -888,7 +1156,6 @@ func (s *Scheduler) getSnapshotInput(mustSnapshot bool) (*SnapshotInput, bool) { for workerId, worker := range workers { res.Workers[workerId] = &WorkerCp{ WorkerId: workerId, - MaxRuns: worker.MaxRuns, Labels: worker.Labels, Name: worker.Name, } diff --git a/pkg/scheduling/v1/scheduler_integration_test.go b/pkg/scheduling/v1/scheduler_integration_test.go new file mode 100644 index 0000000000..79729c3a17 --- /dev/null +++ b/pkg/scheduling/v1/scheduler_integration_test.go @@ -0,0 +1,352 @@ +//go:build integration + +package v1_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/hatchet-dev/hatchet/internal/testutils" + "github.com/hatchet-dev/hatchet/pkg/config/database" + repo "github.com/hatchet-dev/hatchet/pkg/repository" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" + schedv1 "github.com/hatchet-dev/hatchet/pkg/scheduling/v1" +) + +type snapshotEvent struct { + tenantId uuid.UUID + input *schedv1.SnapshotInput +} + +type captureSnapshotsExt struct { + ch chan snapshotEvent +} + +func (c *captureSnapshotsExt) SetTenants(_ []*sqlcv1.Tenant) {} + +func (c *captureSnapshotsExt) ReportSnapshot(tenantId uuid.UUID, input *schedv1.SnapshotInput) { + // non-blocking + select { + case c.ch <- snapshotEvent{tenantId: tenantId, input: input}: + default: + } +} + +func (c *captureSnapshotsExt) PostAssign(_ uuid.UUID, _ *schedv1.PostAssignInput) {} + +func (c *captureSnapshotsExt) Cleanup() error { return nil } + +func runWithDatabase(t *testing.T, test func(conf *database.Layer) error) { + t.Helper() + + // `internal/testutils.Prepare` constructs a server config and requires a RabbitMQ URL. + t.Setenv("SERVER_MSGQUEUE_RABBITMQ_URL", "amqp://user:password@localhost:5672/") + + testutils.RunTestWithDatabase(t, test) +} + +func requireSchedulerSchema(t *testing.T, ctx context.Context, conf *database.Layer) { + t.Helper() + + var hasMaxRuns bool + err := conf.Pool.QueryRow( + ctx, + `SELECT EXISTS ( + SELECT 1 + FROM information_schema.columns + WHERE table_name = 'Worker' AND column_name = 'maxRuns' + )`, + ).Scan(&hasMaxRuns) + require.NoError(t, err) + + if !hasMaxRuns { + t.Skip(`database schema is missing "Worker"."maxRuns"; run migrations (e.g. "task migrate") and re-run integration tests`) + } +} + +func createTenantDispatcherWorker( + t *testing.T, + ctx context.Context, + r repo.Repository, + tenantName string, + maxRuns int, + actions []string, +) (tenantId uuid.UUID, workerId uuid.UUID, tenant *sqlcv1.Tenant) { + t.Helper() + + tenantId = uuid.New() + tenant, err := r.Tenant().CreateTenant(ctx, &repo.CreateTenantOpts{ + ID: &tenantId, + Name: tenantName, + Slug: fmt.Sprintf("%s-%s", tenantName, tenantId.String()), + }) + require.NoError(t, err) + + dispatcherId := uuid.New() + _, err = r.Dispatcher().CreateNewDispatcher(ctx, &repo.CreateDispatcherOpts{ID: dispatcherId}) + require.NoError(t, err) + + worker, err := r.Workers().CreateNewWorker(ctx, tenantId, &repo.CreateWorkerOpts{ + DispatcherId: dispatcherId, + Name: "worker-it", + Services: []string{}, + Actions: actions, + SlotConfig: map[string]int32{repo.SlotTypeDefault: int32(maxRuns)}, + }) + require.NoError(t, err) + + now := time.Now().UTC() + require.NoError(t, r.Workers().UpdateWorkerHeartbeat(ctx, tenantId, worker.ID, now)) + + isActive := true + isPaused := false + _, err = r.Workers().UpdateWorker(ctx, tenantId, worker.ID, &repo.UpdateWorkerOpts{ + IsActive: &isActive, + IsPaused: &isPaused, + }) + require.NoError(t, err) + + return tenantId, worker.ID, tenant +} + +func waitForWorkerUtilization( + t *testing.T, + ch <-chan snapshotEvent, + tenantId uuid.UUID, + workerId uuid.UUID, + timeout time.Duration, +) *schedv1.SlotUtilization { + t.Helper() + + deadline := time.NewTimer(timeout) + defer deadline.Stop() + + for { + select { + case <-deadline.C: + t.Fatalf("timed out waiting for snapshot for tenant %s worker %s", tenantId, workerId) + case ev := <-ch: + if ev.input == nil || ev.tenantId != tenantId { + continue + } + + if ev.input.WorkerSlotUtilization == nil { + continue + } + + if util, ok := ev.input.WorkerSlotUtilization[workerId]; ok { + // Skip snapshots captured before replenish has populated any slots. + if util.UtilizedSlots+util.NonUtilizedSlots == 0 { + continue + } + return util + } + } + } +} + +func requireNoSnapshotsForTenant( + t *testing.T, + ch <-chan snapshotEvent, + tenantId uuid.UUID, + dur time.Duration, +) { + t.Helper() + + timer := time.NewTimer(dur) + defer timer.Stop() + + for { + select { + case <-timer.C: + return + case ev := <-ch: + if ev.tenantId == tenantId { + t.Fatalf("unexpected snapshot for removed tenant %s", tenantId) + } + } + } +} + +func TestScheduler_ReplenishIntegration_SingleActionUtilizationEqualsMaxRuns(t *testing.T) { + runWithDatabase(t, func(conf *database.Layer) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + requireSchedulerSchema(t, ctx, conf) + + actionA := "test:run" + maxRuns := 3 + + tenantId, workerId, tenant := createTenantDispatcherWorker(t, ctx, conf.V1, "scheduler-it", maxRuns, []string{actionA}) + + l := zerolog.Nop() + pool, cleanup, err := schedv1.NewSchedulingPool( + conf.V1.Scheduler(), + &l, + 100, // singleQueueLimit + 20, // schedulerConcurrencyRateLimit + 10*time.Millisecond, // schedulerConcurrencyPollingMinInterval + 50*time.Millisecond, // schedulerConcurrencyPollingMaxInterval + false, // optimisticSchedulingEnabled + 1, // optimisticSlots + ) + require.NoError(t, err) + defer func() { _ = cleanup() }() + + ext := &captureSnapshotsExt{ch: make(chan snapshotEvent, 100)} + pool.Extensions.Add(ext) + + pool.SetTenants([]*sqlcv1.Tenant{tenant}) + + util := waitForWorkerUtilization(t, ext.ch, tenantId, workerId, 5*time.Second) + require.Equal(t, 0, util.UtilizedSlots) + require.Equal(t, maxRuns, util.NonUtilizedSlots) + + return nil + }) +} + +func TestScheduler_ReplenishIntegration_MultipleActionsDoesNotMultiplySlots(t *testing.T) { + runWithDatabase(t, func(conf *database.Layer) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + requireSchedulerSchema(t, ctx, conf) + + // If replenish created distinct slots per action (instead of sharing per worker capacity), + // snapshots would report NonUtilizedSlots == maxRuns * len(actions). We expect == maxRuns. + actionA := "test:run" + actionB := "test:other" + maxRuns := 2 + + tenantId, workerId, tenant := createTenantDispatcherWorker(t, ctx, conf.V1, "scheduler-it2", maxRuns, []string{actionA, actionB}) + + l := zerolog.Nop() + pool, cleanup, err := schedv1.NewSchedulingPool( + conf.V1.Scheduler(), + &l, + 100, + 20, + 10*time.Millisecond, + 50*time.Millisecond, + false, + 1, + ) + require.NoError(t, err) + defer func() { _ = cleanup() }() + + ext := &captureSnapshotsExt{ch: make(chan snapshotEvent, 100)} + pool.Extensions.Add(ext) + + pool.SetTenants([]*sqlcv1.Tenant{tenant}) + + util := waitForWorkerUtilization(t, ext.ch, tenantId, workerId, 5*time.Second) + require.Equal(t, 0, util.UtilizedSlots) + require.Equal(t, maxRuns, util.NonUtilizedSlots) + + return nil + }) +} + +func TestScheduler_ReplenishIntegration_IsSafeUnderConcurrentSnapshots(t *testing.T) { + runWithDatabase(t, func(conf *database.Layer) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + requireSchedulerSchema(t, ctx, conf) + + actionA := "test:run" + maxRuns := 2 + _, _, tenant := createTenantDispatcherWorker(t, ctx, conf.V1, "scheduler-it3", maxRuns, []string{actionA}) + + l := zerolog.Nop() + pool, cleanup, err := schedv1.NewSchedulingPool( + conf.V1.Scheduler(), + &l, + 100, + 20, + 10*time.Millisecond, + 50*time.Millisecond, + false, + 1, + ) + require.NoError(t, err) + defer func() { _ = cleanup() }() + + ext := &captureSnapshotsExt{ch: make(chan snapshotEvent, 1000)} + pool.Extensions.Add(ext) + pool.SetTenants([]*sqlcv1.Tenant{tenant}) + + // Drain a few snapshots concurrently to smoke-test races/panics around snapshotting + replenish. + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + defer wg.Done() + timeout := time.NewTimer(2 * time.Second) + defer timeout.Stop() + for { + select { + case <-timeout.C: + return + case <-ext.ch: + } + } + }() + + wg.Wait() + return nil + }) +} + +func TestScheduler_PoolIntegration_RemovingTenantStopsSnapshots(t *testing.T) { + runWithDatabase(t, func(conf *database.Layer) error { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + requireSchedulerSchema(t, ctx, conf) + + actionA := "test:run" + maxRuns := 2 + + tenantId, workerId, tenant := createTenantDispatcherWorker(t, ctx, conf.V1, "scheduler-it-remove", maxRuns, []string{actionA}) + + l := zerolog.Nop() + pool, cleanup, err := schedv1.NewSchedulingPool( + conf.V1.Scheduler(), + &l, + 100, + 20, + 10*time.Millisecond, + 50*time.Millisecond, + false, + 1, + ) + require.NoError(t, err) + defer func() { _ = cleanup() }() + + ext := &captureSnapshotsExt{ch: make(chan snapshotEvent, 1000)} + pool.Extensions.Add(ext) + + // Start the tenant and confirm we see snapshots for it. + pool.SetTenants([]*sqlcv1.Tenant{tenant}) + _ = waitForWorkerUtilization(t, ext.ch, tenantId, workerId, 5*time.Second) + + // Remove tenant from pool and ensure snapshots stop. + pool.SetTenants([]*sqlcv1.Tenant{}) + + // Give cleanup a short moment to cancel loops, then assert no new snapshots arrive. + time.Sleep(50 * time.Millisecond) + requireNoSnapshotsForTenant(t, ext.ch, tenantId, 350*time.Millisecond) + + return nil + }) +} diff --git a/pkg/scheduling/v1/scheduler_test.go b/pkg/scheduling/v1/scheduler_test.go new file mode 100644 index 0000000000..86e53f3bd1 --- /dev/null +++ b/pkg/scheduling/v1/scheduler_test.go @@ -0,0 +1,1235 @@ +//go:build !e2e && !load && !rampup && !integration + +package v1 + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + repo "github.com/hatchet-dev/hatchet/pkg/repository" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" +) + +type mockAssignmentRepo struct { + listActionsForWorkersFn func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) + listAvailableSlotsForWorkersFn func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) + listAvailableSlotsForWorkersAndTypesFn func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersAndTypesParams) ([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, error) + listWorkerSlotConfigsFn func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) +} + +func (m *mockAssignmentRepo) ListActionsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + if m.listActionsForWorkersFn == nil { + return nil, fmt.Errorf("ListActionsForWorkers not configured") + } + + return m.listActionsForWorkersFn(ctx, tenantId, workerIds) +} + +func (m *mockAssignmentRepo) ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + if m.listAvailableSlotsForWorkersFn == nil { + return nil, fmt.Errorf("ListAvailableSlotsForWorkers not configured") + } + + return m.listAvailableSlotsForWorkersFn(ctx, tenantId, params) +} + +func (m *mockAssignmentRepo) ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersAndTypesParams) ([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, error) { + if m.listAvailableSlotsForWorkersAndTypesFn != nil { + return m.listAvailableSlotsForWorkersAndTypesFn(ctx, tenantId, params) + } + + // Backwards-compat fallback: emulate the multi-type query by calling the per-type query. + if m.listAvailableSlotsForWorkersFn != nil { + out := make([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, 0) + + for _, slotType := range params.Slottypes { + rows, err := m.listAvailableSlotsForWorkersFn(ctx, tenantId, sqlcv1.ListAvailableSlotsForWorkersParams{ + Tenantid: params.Tenantid, + Workerids: params.Workerids, + Slottype: slotType, + }) + if err != nil { + return nil, err + } + + for _, row := range rows { + out = append(out, &sqlcv1.ListAvailableSlotsForWorkersAndTypesRow{ + ID: row.ID, + SlotType: slotType, + AvailableSlots: row.AvailableSlots, + }) + } + } + + return out, nil + } + + return nil, fmt.Errorf("ListAvailableSlotsForWorkersAndTypes not configured") +} + +func (m *mockAssignmentRepo) ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + if m.listWorkerSlotConfigsFn == nil { + // Default: all workers have the default slot type. + out := make([]*sqlcv1.ListWorkerSlotConfigsRow, 0, len(workerIds)) + for _, wid := range workerIds { + out = append(out, &sqlcv1.ListWorkerSlotConfigsRow{ + WorkerID: wid, + SlotType: repo.SlotTypeDefault, + MaxUnits: 0, + }) + } + return out, nil + } + + return m.listWorkerSlotConfigsFn(ctx, tenantId, workerIds) +} + +type mockSchedulerRepo struct { + assignment repo.AssignmentRepository +} + +func (m *mockSchedulerRepo) Concurrency() repo.ConcurrencyRepository { + panic("unexpected call: Concurrency") +} + +func (m *mockSchedulerRepo) Lease() repo.LeaseRepository { + panic("unexpected call: Lease") +} + +func (m *mockSchedulerRepo) QueueFactory() repo.QueueFactoryRepository { + panic("unexpected call: QueueFactory") +} + +func (m *mockSchedulerRepo) RateLimit() repo.RateLimitRepository { + panic("unexpected call: RateLimit") +} + +func (m *mockSchedulerRepo) Assignment() repo.AssignmentRepository { + if m.assignment == nil { + panic("mockSchedulerRepo.assignment is nil") + } + return m.assignment +} + +func (m *mockSchedulerRepo) Optimistic() repo.OptimisticSchedulingRepository { + panic("unexpected call: Optimistic") +} + +func newTestScheduler(t *testing.T, tenantId uuid.UUID, ar repo.AssignmentRepository) *Scheduler { + t.Helper() + + l := zerolog.Nop() + + sr := &mockSchedulerRepo{assignment: ar} + cf := &sharedConfig{ + repo: sr, + l: &l, + } + + // rate limiter not needed for most tests; can be set by the caller if required. + return newScheduler(cf, tenantId, nil, &Extensions{}) +} + +func testWorker(id uuid.UUID) *repo.ListActiveWorkersResult { + return &repo.ListActiveWorkersResult{ + ID: id, + Name: "w", + Labels: nil, + } +} + +func actionWithSlots(actionId string, slots ...*slot) (*action, error) { + a := &action{ + actionId: actionId, + slots: slots, + // populate index for tests; production code builds it in replenish. + slotsByTypeAndWorkerId: make(map[string]map[uuid.UUID][]*slot), + } + + for _, sl := range slots { + slotType, err := sl.getSlotType() + if err != nil { + return nil, fmt.Errorf("getSlotType failed: %w", err) + } + + workerId := sl.getWorkerId() + + if _, ok := a.slotsByTypeAndWorkerId[slotType]; !ok { + a.slotsByTypeAndWorkerId[slotType] = make(map[uuid.UUID][]*slot) + } + a.slotsByTypeAndWorkerId[slotType][workerId] = append(a.slotsByTypeAndWorkerId[slotType][workerId], sl) + } + + return a, nil +} + +func testQI(tenantId uuid.UUID, actionId string, taskId int64) *sqlcv1.V1QueueItem { + return &sqlcv1.V1QueueItem{ + ID: taskId, + TenantID: tenantId, + ActionID: actionId, + TaskID: taskId, + Queue: "q", + StepID: uuid.New(), + ExternalID: uuid.New(), + } +} + +func ts(tm time.Time) pgtype.Timestamp { + return pgtype.Timestamp{Time: tm, Valid: true} +} + +func requireEventually(t *testing.T, dur time.Duration, f func() bool) { + t.Helper() + deadline := time.Now().Add(dur) + for time.Now().Before(deadline) { + if f() { + return + } + time.Sleep(1 * time.Millisecond) + } + require.True(t, f()) +} + +func TestScheduler_AckNack(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + sl := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, sl.use(nil, nil)) + + s.unackedSlots[123] = &assignedSlots{slots: []*slot{sl}} + + s.ack([]int{123, 999}) + + require.True(t, sl.ackd) + require.NotNil(t, sl.expiresAt) + require.Empty(t, s.unackedSlots) + + // nack should reset used=false and remove from unacked + sl2 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, sl2.use(nil, nil)) + s.unackedSlots[777] = &assignedSlots{slots: []*slot{sl2}} + + s.nack([]int{777}) + + require.True(t, sl2.ackd) + require.False(t, sl2.used) + require.Empty(t, s.unackedSlots) +} + +func TestScheduler_SetWorkers_GetWorkers(t *testing.T) { + tenantId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + w1 := testWorker(uuid.New()) + w2 := testWorker(uuid.New()) + + s.setWorkers([]*repo.ListActiveWorkersResult{w1, w2}) + + got := s.copyWorkers() + require.Len(t, got, 2) + require.Equal(t, w1.ID, got[w1.ID].ID) + require.Equal(t, w2.ID, got[w2.ID].ID) +} + +func TestScheduleRateLimitResult_ShouldRemoveFromQueue(t *testing.T) { + // nil underlying result -> false + r := &scheduleRateLimitResult{} + require.False(t, r.shouldRemoveFromQueue()) + + // nextRefillAt far enough in future -> true + future := time.Now().UTC().Add(rateLimitedRequeueAfterThreshold + 250*time.Millisecond) + r.rateLimitResult = &rateLimitResult{nextRefillAt: &future} + require.True(t, r.shouldRemoveFromQueue()) + + // nextRefillAt close -> false + near := time.Now().UTC().Add(rateLimitedRequeueAfterThreshold - 250*time.Millisecond) + r.rateLimitResult = &rateLimitResult{nextRefillAt: &near} + require.False(t, r.shouldRemoveFromQueue()) +} + +func TestSelectSlotsForWorker_SkipsInactive(t *testing.T) { + workerId := uuid.New() + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + s1 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, s1.use(nil, nil)) // used => inactive + + s2 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + // expire s2 + past := time.Now().Add(-1 * time.Second) + s2.mu.Lock() + s2.expiresAt = &past + s2.mu.Unlock() + + s3 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + + slotsByTypeAndWorkerId := map[string]map[uuid.UUID][]*slot{ + repo.SlotTypeDefault: {workerId: {s1, s2, s3}}, + } + + selected, ok := selectSlotsForWorker( + slotsByTypeAndWorkerId, + workerId, + map[string]int32{repo.SlotTypeDefault: 1}, + ) + require.True(t, ok) + require.Len(t, selected, 1) + require.Same(t, s3, selected[0]) +} + +func TestScheduler_TryAssignSingleton_RingWraparound(t *testing.T) { + tenantId := uuid.New() + workerId1 := uuid.New() + workerId2 := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + w1 := &worker{ListActiveWorkersResult: testWorker(workerId1)} + w2 := &worker{ListActiveWorkersResult: testWorker(workerId2)} + + // s1 is used/inactive, s2 is active + s1 := newSlot(w1, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, s1.use(nil, nil)) + s2 := newSlot(w2, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + + a, err := actionWithSlots("A", s1, s2) + require.NoError(t, err) + req := map[string]int32{repo.SlotTypeDefault: 1} + + qi := testQI(tenantId, "A", 1) + res, err := s.tryAssignSingleton(context.Background(), qi, a, []*slot{s1, s2}, 1, nil, req, func() {}, func() {}) + require.NoError(t, err) + require.True(t, res.succeeded) + require.False(t, res.noSlots) + require.Equal(t, workerId2, res.workerId) + require.NotZero(t, res.ackId) + + s.unackedMu.Lock() + _, ok := s.unackedSlots[res.ackId] + s.unackedMu.Unlock() + require.True(t, ok) +} + +func TestScheduler_TryAssignSingleton_NoSlots(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + s1 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, s1.use(nil, nil)) + + a, err := actionWithSlots("A", s1) + require.NoError(t, err) + req := map[string]int32{repo.SlotTypeDefault: 1} + + qi := testQI(tenantId, "A", 1) + res, err := s.tryAssignSingleton(context.Background(), qi, a, []*slot{s1}, 0, nil, req, func() {}, func() {}) + require.NoError(t, err) + require.False(t, res.succeeded) + require.True(t, res.noSlots) +} + +func TestScheduler_TryAssignSingleton_StickyHardForcesRanking(t *testing.T) { + tenantId := uuid.New() + desiredWorkerId := uuid.New() + otherWorkerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + wDesired := &worker{ListActiveWorkersResult: testWorker(desiredWorkerId)} + wOther := &worker{ListActiveWorkersResult: testWorker(otherWorkerId)} + + // Put desired slot second; with HARD sticky it should still be selected. + otherSlot := newSlot(wOther, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + desiredSlot := newSlot(wDesired, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + + a, err := actionWithSlots("A", otherSlot, desiredSlot) + require.NoError(t, err) + req := map[string]int32{repo.SlotTypeDefault: 1} + + qi := testQI(tenantId, "A", 1) + qi.Sticky = sqlcv1.V1StickyStrategyHARD + qi.DesiredWorkerID = &desiredWorkerId + + res, err := s.tryAssignSingleton(context.Background(), qi, a, []*slot{otherSlot, desiredSlot}, 1, nil, req, func() {}, func() {}) + require.NoError(t, err) + require.True(t, res.succeeded) + require.Equal(t, desiredWorkerId, res.workerId) +} + +func TestScheduler_TryAssignSingleton_RateLimitAckIsWiredIntoSlotAck(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + sl := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + a, err := actionWithSlots("A", sl) + require.NoError(t, err) + req := map[string]int32{repo.SlotTypeDefault: 1} + qi := testQI(tenantId, "A", 1) + + ackCount := 0 + rlAck := func() { ackCount++ } + + res, err := s.tryAssignSingleton(context.Background(), qi, a, []*slot{sl}, 0, nil, req, rlAck, func() {}) + require.NoError(t, err) + require.True(t, res.succeeded) + + s.ack([]int{res.ackId}) + require.Equal(t, 1, ackCount) +} + +func TestScheduler_TryAssignBatch_NoActionSlots(t *testing.T) { + tenantId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + qis := []*sqlcv1.V1QueueItem{ + testQI(tenantId, "missing", 1), + testQI(tenantId, "missing", 2), + } + + res, _, err := s.tryAssignBatch(context.Background(), "missing", qis, 0, nil, nil, nil) + require.NoError(t, err) + require.Len(t, res, 2) + for _, r := range res { + require.True(t, r.noSlots) + require.False(t, r.succeeded) + } +} + +func TestScheduler_Replenish_SkipsIfReplenishInProgress(t *testing.T) { + tenantId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return nil, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + return nil, nil + }, + }) + + // hold replenish lock to force TryLock() failure + s.replenishMu.Lock() + defer s.replenishMu.Unlock() + + require.NoError(t, s.replenish(context.Background(), false)) +} + +func TestScheduler_Replenish_SkipsIfCannotAcquireActionsLock(t *testing.T) { + tenantId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + t.Fatalf("should not hit repo when actions lock can't be acquired") + return nil, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + t.Fatalf("should not hit repo when actions lock can't be acquired") + return nil, nil + }, + }) + + // Hold the actions write lock so TryLock fails (mustReplenish=false path). + s.actionsMu.Lock() + defer s.actionsMu.Unlock() + + require.NoError(t, s.replenish(context.Background(), false)) +} + +func TestScheduler_Replenish_DoesNotLockUnackedMuBeforeActionLocks(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + workerSlotConfigsCalled := make(chan struct{}) + + ar := &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, gotTenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + require.Equal(t, tenantId, gotTenantId) + require.Len(t, workerIds, 1) + require.Equal(t, workerId, workerIds[0]) + + return []*sqlcv1.ListActionsForWorkersRow{ + { + WorkerId: workerId, + ActionId: pgtype.Text{String: "A", Valid: true}, + }, + }, nil + }, + listWorkerSlotConfigsFn: func(ctx context.Context, gotTenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + require.Equal(t, tenantId, gotTenantId) + require.Len(t, workerIds, 1) + require.Equal(t, workerId, workerIds[0]) + + select { + case <-workerSlotConfigsCalled: + // already closed + default: + close(workerSlotConfigsCalled) + } + + return []*sqlcv1.ListWorkerSlotConfigsRow{ + { + WorkerID: workerId, + SlotType: repo.SlotTypeDefault, + MaxUnits: 1, + }, + }, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, gotTenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + require.Equal(t, tenantId, gotTenantId) + require.Equal(t, repo.SlotTypeDefault, params.Slottype) + require.Len(t, params.Workerids, 1) + require.Equal(t, workerId, params.Workerids[0]) + + return []*sqlcv1.ListAvailableSlotsForWorkersRow{ + { + ID: workerId, + AvailableSlots: 1, + }, + }, nil + }, + } + + s := newTestScheduler(t, tenantId, ar) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + // Pre-create an action so replenish includes it in orderedLock(actionsToLock). + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + sl := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + a, err := actionWithSlots("A", sl) + require.NoError(t, err) + s.actions["A"] = a + + a.mu.Lock() + + replenishDone := make(chan error, 1) + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + replenishDone <- s.replenish(ctx, true) + }() + + select { + case <-workerSlotConfigsCalled: + case <-time.After(2 * time.Second): + a.mu.Unlock() + t.Fatalf("timed out waiting for replenish to call ListWorkerSlotConfigs") + } + + // While replenish is blocked trying to acquire action locks, it must not hold unackedMu. + // If lock order ever regresses (unackedMu before action.mu), this will fail. + deadline := time.Now().Add(50 * time.Millisecond) + for time.Now().Before(deadline) { + if ok := s.unackedMu.TryLock(); ok { + s.unackedMu.Unlock() + } else { + a.mu.Unlock() + t.Fatalf("replenish acquired unackedMu while action.mu was held (lock order violation)") + } + time.Sleep(1 * time.Millisecond) + } + + a.mu.Unlock() + + select { + case err := <-replenishDone: + require.NoError(t, err) + case <-time.After(2 * time.Second): + t.Fatalf("timed out waiting for replenish to complete (possible deadlock)") + } +} + +func TestScheduler_TryAssignBatch_AssignsUntilExhausted(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + // two total slots + sl1 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + sl2 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + + actA, err := actionWithSlots("A", sl1, sl2) + require.NoError(t, err) + s.actions["A"] = actA + + qis := []*sqlcv1.V1QueueItem{ + testQI(tenantId, "A", 1), + testQI(tenantId, "A", 2), + testQI(tenantId, "A", 3), + } + + res, newOffset, err := s.tryAssignBatch(context.Background(), "A", qis, 0, map[uuid.UUID][]*sqlcv1.GetDesiredLabelsRow{}, map[uuid.UUID]map[string]int32{}, nil) + require.NoError(t, err) + require.Equal(t, 3, newOffset) + + var assigned, noSlots int + for _, r := range res { + if r.succeeded { + assigned++ + } + if r.noSlots { + noSlots++ + } + } + + require.Equal(t, 2, assigned) + require.Equal(t, 1, noSlots) +} + +func TestScheduler_TryAssignBatch_RateLimitedSkipsAssignment(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + l := zerolog.Nop() + s.rl = &rateLimiter{ + tenantId: tenantId, + l: &l, + unacked: make(map[int64]rateLimitSet), + unflushed: make(rateLimitSet), + dbRateLimits: rateLimitSet{"k": {key: "k", val: 0, nextRefillAt: ptrTime(time.Now().UTC().Add(10 * time.Second))}}, + } + + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + sl := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + actA, err := actionWithSlots("A", sl) + require.NoError(t, err) + s.actions["A"] = actA + + qi := testQI(tenantId, "A", 100) + qis := []*sqlcv1.V1QueueItem{qi} + + rls := map[int64]map[string]int32{ + qi.TaskID: {"k": 1}, + } + + res, _, err := s.tryAssignBatch(context.Background(), "A", qis, 0, nil, map[uuid.UUID]map[string]int32{}, rls) + require.NoError(t, err) + require.Len(t, res, 1) + require.False(t, res[0].succeeded) + require.NotNil(t, res[0].rateLimitResult) + require.False(t, res[0].noSlots) +} + +func TestScheduler_TryAssign_GroupsAndFiltersTimedOut(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + // A has 1 slot, B has 1 slot + actA, err := actionWithSlots("A", newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault))) + require.NoError(t, err) + s.actions["A"] = actA + actB, err := actionWithSlots("B", newSlot(w, newSlotMeta([]string{"B"}, repo.SlotTypeDefault))) + require.NoError(t, err) + s.actions["B"] = actB + + timeoutQI := testQI(tenantId, "A", 1) + timeoutQI.ScheduleTimeoutAt = ts(time.Now().UTC().Add(-1 * time.Second)) + + a1 := testQI(tenantId, "A", 2) + a2 := testQI(tenantId, "A", 3) // will be unassigned (only one slot) + b1 := testQI(tenantId, "B", 4) + + ch := s.tryAssign( + context.Background(), + []*sqlcv1.V1QueueItem{timeoutQI, a1, a2, b1}, + map[uuid.UUID][]*sqlcv1.GetDesiredLabelsRow{}, + map[uuid.UUID]map[string]int32{}, + nil, + ) + + var ( + assignedIDs = map[int64]bool{} + unassignedID = map[int64]bool{} + timedOutID = map[int64]bool{} + ) + + for r := range ch { + for _, to := range r.schedulingTimedOut { + timedOutID[to.TaskID] = true + } + for _, u := range r.unassigned { + unassignedID[u.TaskID] = true + } + for _, a := range r.assigned { + assignedIDs[a.QueueItem.TaskID] = true + } + } + + require.True(t, timedOutID[timeoutQI.TaskID]) + require.True(t, assignedIDs[a1.TaskID] || assignedIDs[a2.TaskID]) // one of them assigned + require.True(t, unassignedID[a1.TaskID] || unassignedID[a2.TaskID]) // the other unassigned + require.True(t, assignedIDs[b1.TaskID]) +} + +func TestScheduler_GetExtensionInput(t *testing.T) { + tenantId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + qi1 := testQI(tenantId, "A", 1) + qi2 := testQI(tenantId, "A", 2) + + in := s.getExtensionInput([]*assignResults{ + {unassigned: []*sqlcv1.V1QueueItem{qi1}}, + {unassigned: []*sqlcv1.V1QueueItem{}}, + {unassigned: []*sqlcv1.V1QueueItem{qi2}}, + }) + + require.True(t, in.HasUnassignedStepRuns) + + in2 := s.getExtensionInput([]*assignResults{{unassigned: nil}}) + require.False(t, in2.HasUnassignedStepRuns) +} + +func TestScheduler_GetSnapshotInput_BestEffortTryLock(t *testing.T) { + tenantId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + // Hold write lock so TryRLock fails. + s.actionsMu.Lock() + defer s.actionsMu.Unlock() + + in, ok := s.getSnapshotInput(false) + require.False(t, ok) + require.Nil(t, in) +} + +func TestScheduler_GetSnapshotInput_DedupSlotsAcrossActions(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + s.setWorkers([]*repo.ListActiveWorkersResult{{ID: workerId, Name: "w1", Labels: nil}}) + + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + sharedSlot := newSlot(w, newSlotMeta([]string{"A", "B"}, repo.SlotTypeDefault)) + require.True(t, sharedSlot.use(nil, nil)) // used + unusedSlot := newSlot(w, newSlotMeta([]string{"A", "B"}, repo.SlotTypeDefault)) + + actA, err := actionWithSlots("A", sharedSlot, unusedSlot) + require.NoError(t, err) + s.actions["A"] = actA + actB, err := actionWithSlots("B", sharedSlot, unusedSlot) // duplicate pointers + require.NoError(t, err) + s.actions["B"] = actB + + in, ok := s.getSnapshotInput(true) + require.True(t, ok) + require.NotNil(t, in) + require.Len(t, in.Workers, 1) + require.Equal(t, workerId, in.Workers[workerId].WorkerId) + + util := in.WorkerSlotUtilization[workerId] + require.NotNil(t, util) + require.Equal(t, 1, util.UtilizedSlots) + require.Equal(t, 1, util.NonUtilizedSlots) +} + +func TestScheduler_IsTimedOut(t *testing.T) { + tenantId := uuid.New() + qi := testQI(tenantId, "A", 1) + require.False(t, isTimedOut(qi)) + + qi.ScheduleTimeoutAt = ts(time.Now().UTC().Add(-1 * time.Millisecond)) + require.True(t, isTimedOut(qi)) + + qi.ScheduleTimeoutAt = ts(time.Now().UTC().Add(5 * time.Second)) + require.False(t, isTimedOut(qi)) +} + +func TestScheduler_LoopsExitOnCancel(t *testing.T) { + tenantId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + doneRepl := make(chan struct{}) + go func() { + s.loopReplenish(ctx) + close(doneRepl) + }() + + doneSnap := make(chan struct{}) + go func() { + s.loopSnapshot(ctx) + close(doneSnap) + }() + + select { + case <-doneRepl: + case <-time.After(250 * time.Millisecond): + t.Fatalf("loopReplenish did not exit on cancel") + } + + select { + case <-doneSnap: + case <-time.After(250 * time.Millisecond): + t.Fatalf("loopSnapshot did not exit on cancel") + } +} + +func ptrTime(t time.Time) *time.Time { return &t } + +func TestScheduler_Start_Smoke(t *testing.T) { + tenantId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + // should not block or panic even if canceled + s.start(ctx) +} + +func TestSelectSlotsForWorker_MissingTypeOrInsufficientUnitsFails(t *testing.T) { + workerId := uuid.New() + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + one := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + + slotsByTypeAndWorkerId := map[string]map[uuid.UUID][]*slot{ + repo.SlotTypeDefault: {workerId: {one}}, + } + + _, ok := selectSlotsForWorker(slotsByTypeAndWorkerId, workerId, map[string]int32{repo.SlotTypeDurable: 1}) + require.False(t, ok) + + _, ok = selectSlotsForWorker(slotsByTypeAndWorkerId, workerId, map[string]int32{repo.SlotTypeDefault: 2}) + require.False(t, ok) +} + +func TestFindAssignableSlots_MultiUnitSameType(t *testing.T) { + workerId := uuid.New() + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + s1 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + s2 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + s3 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, s3.use(nil, nil)) // used; ensure not selected + + a, err := actionWithSlots("A", s1, s2, s3) + require.NoError(t, err) + + assigned := findAssignableSlots(a.slots, a, map[string]int32{repo.SlotTypeDefault: 2}, nil, nil) + require.NotNil(t, assigned) + require.Len(t, assigned.slots, 2) + require.Equal(t, workerId, assigned.workerId()) + + // both selected are now used + for _, sl := range assigned.slots { + require.True(t, sl.isUsed()) + } +} + +func TestFindAssignableSlots_MultiType(t *testing.T) { + workerId := uuid.New() + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + def := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + dur := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDurable)) + + a, err := actionWithSlots("A", def, dur) + require.NoError(t, err) + + assigned := findAssignableSlots(a.slots, a, map[string]int32{repo.SlotTypeDefault: 1, repo.SlotTypeDurable: 1}, nil, nil) + require.NotNil(t, assigned) + require.Len(t, assigned.slots, 2) + + gotTypes := map[string]bool{} + for _, sl := range assigned.slots { + slotType, err := sl.getSlotType() + require.NoError(t, err) + gotTypes[slotType] = true + } + require.True(t, gotTypes[repo.SlotTypeDefault]) + require.True(t, gotTypes[repo.SlotTypeDurable]) +} + +func TestUseSelectedSlots_PartialAllocationRollback(t *testing.T) { + workerId := uuid.New() + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + + s1 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + s2 := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + + // Simulate a concurrent take of the second slot after selection but before useSelectedSlots. + require.True(t, s2.use(nil, nil)) + + used, ok := useSelectedSlots([]*slot{s1, s2}) + require.False(t, ok) + require.Nil(t, used) + + // rollback should have nacked s1 (used=false) + require.False(t, s1.isUsed()) + // s2 was taken by the simulated concurrent use + require.True(t, s2.isUsed()) +} + +func TestScheduler_Nack_CallsRateLimitNackOnce(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{}) + + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + sl := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, sl.use(nil, nil)) + + nackCount := 0 + as := &assignedSlots{ + slots: []*slot{sl}, + rateLimitNack: func() { nackCount++ }, + } + + s.unackedSlots[1] = as + s.nack([]int{1}) + + require.Equal(t, 1, nackCount) + require.False(t, sl.isUsed()) +} + +func TestScheduler_Replenish_MultipleSlotTypes_CallsRepoPerTypeAndPopulatesSlotsByWorker(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + called := map[string]int{} + + ar := &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + }, nil + }, + listWorkerSlotConfigsFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + return []*sqlcv1.ListWorkerSlotConfigsRow{ + {WorkerID: workerId, SlotType: repo.SlotTypeDefault, MaxUnits: 2}, + {WorkerID: workerId, SlotType: repo.SlotTypeDurable, MaxUnits: 2}, + }, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + called[params.Slottype]++ + switch params.Slottype { + case repo.SlotTypeDefault: + return []*sqlcv1.ListAvailableSlotsForWorkersRow{{ID: workerId, AvailableSlots: 2}}, nil + case repo.SlotTypeDurable: + return []*sqlcv1.ListAvailableSlotsForWorkersRow{{ID: workerId, AvailableSlots: 2}}, nil + default: + return nil, fmt.Errorf("unexpected slot type %q", params.Slottype) + } + }, + } + + s := newTestScheduler(t, tenantId, ar) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + err := s.replenish(context.Background(), true) + require.NoError(t, err) + + require.Equal(t, 1, called[repo.SlotTypeDefault]) + require.Equal(t, 1, called[repo.SlotTypeDurable]) + + a := s.actions["A"] + require.NotNil(t, a) + require.Len(t, a.slots, 4) + + countByType := map[string]int{} + for _, sl := range a.slots { + if sl.getWorkerId() != workerId { + continue + } + slotType, err := sl.getSlotType() + require.NoError(t, err) + countByType[slotType]++ + } + require.Equal(t, 2, countByType[repo.SlotTypeDefault]) + require.Equal(t, 2, countByType[repo.SlotTypeDurable]) +} + +func TestScheduler_Replenish_UnackedCountsPerSlotType(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + ar := &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + }, nil + }, + listWorkerSlotConfigsFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + return []*sqlcv1.ListWorkerSlotConfigsRow{ + {WorkerID: workerId, SlotType: repo.SlotTypeDefault, MaxUnits: 2}, + {WorkerID: workerId, SlotType: repo.SlotTypeDurable, MaxUnits: 2}, + }, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + switch params.Slottype { + case repo.SlotTypeDefault: + return []*sqlcv1.ListAvailableSlotsForWorkersRow{{ID: workerId, AvailableSlots: 2}}, nil + case repo.SlotTypeDurable: + return []*sqlcv1.ListAvailableSlotsForWorkersRow{{ID: workerId, AvailableSlots: 2}}, nil + default: + return nil, fmt.Errorf("unexpected slot type %q", params.Slottype) + } + }, + } + + s := newTestScheduler(t, tenantId, ar) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + // Seed one unacked durable slot; should only reduce *durable* new-slot count. + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + unackedDurable := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDurable)) + require.True(t, unackedDurable.use(nil, nil)) + s.unackedSlots[1] = &assignedSlots{slots: []*slot{unackedDurable}} + + err := s.replenish(context.Background(), true) + require.NoError(t, err) + + a := s.actions["A"] + require.NotNil(t, a) + countDefault := 0 + countDurable := 0 + foundUnacked := false + for _, sl := range a.slots { + if sl.getWorkerId() != workerId { + continue + } + slotType, err := sl.getSlotType() + require.NoError(t, err) + + switch slotType { + case repo.SlotTypeDefault: + countDefault++ + case repo.SlotTypeDurable: + countDurable++ + if sl == unackedDurable { + foundUnacked = true + } + } + } + + // default should be unaffected: 2 fresh default slots + require.Equal(t, 2, countDefault) + // durable should still total to 2, but include the unacked durable slot + require.Equal(t, 2, countDurable) + require.True(t, foundUnacked, "expected unacked durable slot to be carried forward into replenished slots") +} + +func TestScheduler_Replenish_PropagatesRepoErrors(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + sentinel := fmt.Errorf("boom") + + t.Run("ListActionsForWorkers", func(t *testing.T) { + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return nil, sentinel + }, + }) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + err := s.replenish(context.Background(), true) + require.ErrorIs(t, err, sentinel) + }) + + t.Run("ListWorkerSlotConfigs", func(t *testing.T) { + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + }, nil + }, + listWorkerSlotConfigsFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + return nil, sentinel + }, + }) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + err := s.replenish(context.Background(), true) + require.ErrorIs(t, err, sentinel) + }) + + t.Run("ListAvailableSlotsForWorkers", func(t *testing.T) { + s := newTestScheduler(t, tenantId, &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + }, nil + }, + listWorkerSlotConfigsFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) { + return []*sqlcv1.ListWorkerSlotConfigsRow{ + {WorkerID: workerId, SlotType: repo.SlotTypeDefault, MaxUnits: 2}, + }, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + return nil, sentinel + }, + }) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + err := s.replenish(context.Background(), true) + require.ErrorIs(t, err, sentinel) + }) +} + +func TestScheduler_Replenish_CreatesActionAndSlots(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + ar := &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + }, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + require.Equal(t, repo.SlotTypeDefault, params.Slottype) + return []*sqlcv1.ListAvailableSlotsForWorkersRow{ + {ID: workerId, AvailableSlots: 3}, + }, nil + }, + } + + s := newTestScheduler(t, tenantId, ar) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + err := s.replenish(context.Background(), true) + require.NoError(t, err) + + a, ok := s.actions["A"] + require.True(t, ok) + require.NotNil(t, a) + require.Len(t, a.slots, 3) + require.Equal(t, 3, a.lastReplenishedSlotCount) + require.Equal(t, 1, a.lastReplenishedWorkerCount) + + for _, sl := range a.slots { + require.Equal(t, workerId, sl.getWorkerId()) + slotType, err := sl.getSlotType() + require.NoError(t, err) + require.Equal(t, repo.SlotTypeDefault, slotType) + } +} + +func TestScheduler_Replenish_CleansExpiredSlotsWhenNoNewSlotsLoaded(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + ar := &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + }, nil + }, + // simulate no rows returned => no new slots written + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + require.Equal(t, repo.SlotTypeDefault, params.Slottype) + return []*sqlcv1.ListAvailableSlotsForWorkersRow{}, nil + }, + } + + s := newTestScheduler(t, tenantId, ar) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + expired := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + past := time.Now().Add(-1 * time.Second) + expired.mu.Lock() + expired.expiresAt = &past + expired.mu.Unlock() + + used := newSlot(w, newSlotMeta([]string{"A"}, repo.SlotTypeDefault)) + require.True(t, used.use(nil, nil)) + + actA, err := actionWithSlots("A", expired, used) + require.NoError(t, err) + s.actions["A"] = actA + s.actions["A"].lastReplenishedSlotCount = 2 + + err = s.replenish(context.Background(), false) + require.NoError(t, err) + + a := s.actions["A"] + require.NotNil(t, a) + require.Len(t, a.slots, 1) + require.Same(t, used, a.slots[0]) +} + +func TestScheduler_Replenish_UpdatesAllWorkerActionsForLockSafety(t *testing.T) { + tenantId := uuid.New() + workerId := uuid.New() + + ar := &mockAssignmentRepo{ + listActionsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error) { + return []*sqlcv1.ListActionsForWorkersRow{ + {WorkerId: workerId, ActionId: pgtype.Text{String: "A", Valid: true}}, + {WorkerId: workerId, ActionId: pgtype.Text{String: "B", Valid: true}}, + }, nil + }, + listAvailableSlotsForWorkersFn: func(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error) { + require.Equal(t, repo.SlotTypeDefault, params.Slottype) + return []*sqlcv1.ListAvailableSlotsForWorkersRow{ + {ID: workerId, AvailableSlots: 2}, + }, nil + }, + } + + s := newTestScheduler(t, tenantId, ar) + s.setWorkers([]*repo.ListActiveWorkersResult{testWorker(workerId)}) + + // Seed actions so FUNCTION 1 decision logic runs. + w := &worker{ListActiveWorkersResult: testWorker(workerId)} + usedSlot := newSlot(w, newSlotMeta([]string{"A", "B"}, repo.SlotTypeDefault)) + require.True(t, usedSlot.use(nil, nil)) + + actA, err := actionWithSlots("A", usedSlot) + require.NoError(t, err) + s.actions["A"] = actA + s.actions["A"].lastReplenishedSlotCount = 2 + s.actions["A"].lastReplenishedWorkerCount = 1 + + actB, err := actionWithSlots("B", newSlot(w, newSlotMeta([]string{"A", "B"}, repo.SlotTypeDefault))) + require.NoError(t, err) + s.actions["B"] = actB + s.actions["B"].lastReplenishedSlotCount = 100 + s.actions["B"].lastReplenishedWorkerCount = 1 + + err = s.replenish(context.Background(), false) + require.NoError(t, err) + + a := s.actions["A"] + b := s.actions["B"] + require.NotNil(t, a) + require.NotNil(t, b) + require.Len(t, a.slots, 2) + require.Len(t, b.slots, 2) + + // Compare as sets (order is randomized per action). + setA := map[*slot]bool{} + for _, sl := range a.slots { + setA[sl] = true + } + for _, sl := range b.slots { + require.True(t, setA[sl], "expected slot pointers shared across actions for same worker capacity") + } +} diff --git a/pkg/scheduling/v1/slot.go b/pkg/scheduling/v1/slot.go index f3dab7e1d3..8426f2d707 100644 --- a/pkg/scheduling/v1/slot.go +++ b/pkg/scheduling/v1/slot.go @@ -1,11 +1,13 @@ package v1 import ( + "fmt" "slices" "sync" "time" "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) @@ -13,28 +15,69 @@ import ( // time for unacked slots to get written back to the database. const defaultSlotExpiry = 1500 * time.Millisecond -type slot struct { - worker *worker - actions []string - - // expiresAt is when the slot is no longer valid, but has not been cleaned up yet - expiresAt *time.Time - used bool +// slotMeta is shared across many slots to avoid duplicating +// metadata that is identical for a worker/type. +type slotMeta struct { + slotType string + actions []string +} - ackd bool +func newSlotMeta(actions []string, slotType string) *slotMeta { + return &slotMeta{ + actions: actions, + slotType: slotType, + } +} +type slot struct { + worker *worker + meta *slotMeta + expiresAt *time.Time additionalAcks []func() additionalNacks []func() + mu sync.RWMutex + used bool + ackd bool +} + +type assignedSlots struct { + slots []*slot + rateLimitAck func() + rateLimitNack func() +} + +func (a *assignedSlots) workerId() uuid.UUID { + if len(a.slots) == 0 { + return uuid.Nil + } - mu sync.RWMutex + return a.slots[0].getWorkerId() } -func newSlot(worker *worker, actions []string) *slot { +func (a *assignedSlots) ack() { + for _, slot := range a.slots { + slot.ack() + } + if a.rateLimitAck != nil { + a.rateLimitAck() + } +} + +func (a *assignedSlots) nack() { + for _, slot := range a.slots { + slot.nack() + } + if a.rateLimitNack != nil { + a.rateLimitNack() + } +} + +func newSlot(worker *worker, meta *slotMeta) *slot { expires := time.Now().Add(defaultSlotExpiry) return &slot{ worker: worker, - actions: actions, + meta: meta, expiresAt: &expires, } } @@ -43,6 +86,14 @@ func (s *slot) getWorkerId() uuid.UUID { return s.worker.ID } +func (s *slot) getSlotType() (string, error) { + if s.meta == nil { + return "", fmt.Errorf("slot has nil meta") + } + + return s.meta.slotType, nil +} + func (s *slot) extendExpiry() { s.mu.Lock() defer s.mu.Unlock() diff --git a/pkg/scheduling/v1/slot_test.go b/pkg/scheduling/v1/slot_test.go index ce11cd358b..9538dff492 100644 --- a/pkg/scheduling/v1/slot_test.go +++ b/pkg/scheduling/v1/slot_test.go @@ -36,8 +36,8 @@ func TestGetRankedSlots(t *testing.T) { DesiredWorkerID: &stableWorkerId1, }, slots: []*slot{ - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: stableWorkerId1}}, []string{}), - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: uuid.New()}}, []string{}), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: stableWorkerId1}}, newSlotMeta([]string{}, "default")), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: uuid.New()}}, newSlotMeta([]string{}, "default")), }, expectedWorker: []string{stableWorkerId1.String()}, }, @@ -48,8 +48,8 @@ func TestGetRankedSlots(t *testing.T) { DesiredWorkerID: ptrUUID(uuid.New().String()), }, slots: []*slot{ - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: uuid.New()}}, []string{}), - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: uuid.New()}}, []string{}), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: uuid.New()}}, newSlotMeta([]string{}, "default")), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: uuid.New()}}, newSlotMeta([]string{}, "default")), }, expectedWorker: []string{}, }, @@ -60,9 +60,9 @@ func TestGetRankedSlots(t *testing.T) { DesiredWorkerID: &stableWorkerId1, }, slots: []*slot{ - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId2)}}, []string{}), - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId1)}}, []string{}), - newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId1)}}, []string{}), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId2)}}, newSlotMeta([]string{}, "default")), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId1)}}, newSlotMeta([]string{}, "default")), + newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId1)}}, newSlotMeta([]string{}, "default")), }, expectedWorker: []string{stableWorkerId1.String(), stableWorkerId1.String(), stableWorkerId2.String()}, }, @@ -89,14 +89,14 @@ func TestGetRankedSlots(t *testing.T) { newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId1), Labels: []*sqlcv1.ListManyWorkerLabelsRow{{ Key: "key1", IntValue: pgtype.Int4{Int32: 2, Valid: true}, - }}}}, []string{}), + }}}}, newSlotMeta([]string{}, "default")), newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId2), Labels: []*sqlcv1.ListManyWorkerLabelsRow{{ Key: "key1", IntValue: pgtype.Int4{Int32: 4, Valid: true}, }, { Key: "key2", IntValue: pgtype.Int4{Int32: 4, Valid: true}, - }}}}, []string{}), + }}}}, newSlotMeta([]string{}, "default")), }, expectedWorker: []string{stableWorkerId2.String(), stableWorkerId1.String()}, }, @@ -116,7 +116,7 @@ func TestGetRankedSlots(t *testing.T) { newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId1), Labels: []*sqlcv1.ListManyWorkerLabelsRow{{ Key: "key1", IntValue: pgtype.Int4{Int32: 1, Valid: true}, - }}}}, []string{}), + }}}}, newSlotMeta([]string{}, "default")), }, expectedWorker: []string{stableWorkerId1.String()}, }, @@ -136,7 +136,7 @@ func TestGetRankedSlots(t *testing.T) { newSlot(&worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: (stableWorkerId2), Labels: []*sqlcv1.ListManyWorkerLabelsRow{{ Key: "key1", IntValue: pgtype.Int4{Int32: 2, Valid: true}, - }}}}, []string{}), + }}}}, newSlotMeta([]string{}, "default")), }, expectedWorker: []string{}, }, @@ -154,3 +154,30 @@ func TestGetRankedSlots(t *testing.T) { }) } } + +func TestSelectSlotsForWorker(t *testing.T) { + workerId := uuid.New() + worker := &worker{ListActiveWorkersResult: &v1.ListActiveWorkersResult{ID: workerId}} + + slotsByType := map[string]map[uuid.UUID][]*slot{ + "cpu": { + workerId: { + newSlot(worker, newSlotMeta([]string{}, "cpu")), + newSlot(worker, newSlotMeta([]string{}, "cpu")), + newSlot(worker, newSlotMeta([]string{}, "cpu")), + }, + }, + "mem": { + workerId: { + newSlot(worker, newSlotMeta([]string{}, "mem")), + }, + }, + } + + selected, ok := selectSlotsForWorker(slotsByType, workerId, map[string]int32{"cpu": 2, "mem": 1}) + assert.True(t, ok) + assert.Len(t, selected, 3) + + _, ok = selectSlotsForWorker(slotsByType, workerId, map[string]int32{"cpu": 4}) + assert.False(t, ok) +} diff --git a/pkg/telemetry/metrics.go b/pkg/telemetry/metrics.go index 81fe6754b9..106554024b 100644 --- a/pkg/telemetry/metrics.go +++ b/pkg/telemetry/metrics.go @@ -28,9 +28,10 @@ type MetricsRecorder struct { yesterdayRunCountGauge metric.Int64Gauge // Worker metrics - activeSlotsGauge metric.Int64Gauge - activeWorkersGauge metric.Int64Gauge - activeSDKsGauge metric.Int64Gauge + activeSlotsGauge metric.Int64Gauge + activeSlotsByKeyGauge metric.Int64Gauge + activeWorkersGauge metric.Int64Gauge + activeSDKsGauge metric.Int64Gauge } // NewMetricsRecorder creates a new metrics recorder with all instruments registered @@ -120,6 +121,14 @@ func NewMetricsRecorder(ctx context.Context) (*MetricsRecorder, error) { return nil, fmt.Errorf("failed to create active slots gauge: %w", err) } + activeSlotsByKeyGauge, err := meter.Int64Gauge( + "hatchet.workers.active_slots.by_key", + metric.WithDescription("Number of active worker slots per tenant and slot key"), + ) + if err != nil { + return nil, fmt.Errorf("failed to create active slots by key gauge: %w", err) + } + activeWorkersGauge, err := meter.Int64Gauge( "hatchet.workers.active_count", metric.WithDescription("Number of active workers per tenant"), @@ -148,6 +157,7 @@ func NewMetricsRecorder(ctx context.Context) (*MetricsRecorder, error) { olapTempTableSizeTaskGauge: olapTempTableSizeTaskGauge, yesterdayRunCountGauge: yesterdayRunCountGauge, activeSlotsGauge: activeSlotsGauge, + activeSlotsByKeyGauge: activeSlotsByKeyGauge, activeWorkersGauge: activeWorkersGauge, activeSDKsGauge: activeSDKsGauge, }, nil @@ -210,6 +220,15 @@ func (m *MetricsRecorder) RecordActiveSlots(ctx context.Context, tenantId uuid.U metric.WithAttributes(attribute.String("tenant_id", tenantId.String()))) } +// RecordActiveSlotsByKey records the number of active worker slots by key +func (m *MetricsRecorder) RecordActiveSlotsByKey(ctx context.Context, tenantId uuid.UUID, slotKey string, count int64) { + m.activeSlotsByKeyGauge.Record(ctx, count, + metric.WithAttributes( + attribute.String("tenant_id", tenantId.String()), + attribute.String("slot_key", slotKey), + )) +} + // RecordActiveWorkers records the number of active workers func (m *MetricsRecorder) RecordActiveWorkers(ctx context.Context, tenantId uuid.UUID, count int64) { m.activeWorkersGauge.Record(ctx, count, diff --git a/pkg/telemetry/servertel/attributes.go b/pkg/telemetry/servertel/attributes.go index 930cd383f4..a586beb387 100644 --- a/pkg/telemetry/servertel/attributes.go +++ b/pkg/telemetry/servertel/attributes.go @@ -2,6 +2,7 @@ package servertel import ( "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/telemetry" ) diff --git a/pkg/v1/task/task.go b/pkg/v1/task/task.go index 3ad03cdfb2..b9c5056fcb 100644 --- a/pkg/v1/task/task.go +++ b/pkg/v1/task/task.go @@ -290,6 +290,7 @@ func (t *TaskDeclaration[I]) Dump(workflowName string, taskDefaults *create.Task base := makeContractTaskOpts(&t.TaskShared, taskDefaults) base.ReadableId = t.Name base.Action = getActionID(workflowName, t.Name) + base.IsDurable = false base.Parents = make([]string, len(t.Parents)) copy(base.Parents, t.Parents) @@ -344,6 +345,7 @@ func (t *DurableTaskDeclaration[I]) Dump(workflowName string, taskDefaults *crea base := makeContractTaskOpts(&t.TaskShared, taskDefaults) base.ReadableId = t.Name base.Action = getActionID(workflowName, t.Name) + base.IsDurable = true base.Parents = make([]string, len(t.Parents)) copy(base.Parents, t.Parents) return base @@ -358,6 +360,7 @@ func (t *OnFailureTaskDeclaration[I]) Dump(workflowName string, taskDefaults *cr base.ReadableId = "on-failure" base.Action = getActionID(workflowName, "on-failure") + base.IsDurable = false return base } diff --git a/pkg/v1/worker/worker.go b/pkg/v1/worker/worker.go index 2d2afb4991..2c82a1ee89 100644 --- a/pkg/v1/worker/worker.go +++ b/pkg/v1/worker/worker.go @@ -5,14 +5,12 @@ package worker import ( "context" "fmt" - "sync" v0Client "github.com/hatchet-dev/hatchet/pkg/client" "github.com/hatchet-dev/hatchet/pkg/v1/features" "github.com/hatchet-dev/hatchet/pkg/v1/workflow" "github.com/hatchet-dev/hatchet/pkg/worker" "github.com/rs/zerolog" - "golang.org/x/sync/errgroup" ) // Deprecated: Worker is part of the old generics-based v1 Go SDK. @@ -28,13 +26,13 @@ type Worker interface { // RegisterWorkflows registers one or more workflows with the worker. RegisterWorkflows(workflows ...workflow.WorkflowBase) error - // IsPaused checks if all worker instances are paused + // IsPaused checks if the worker is paused IsPaused(ctx context.Context) (bool, error) - // Pause pauses all worker instances + // Pause pauses the worker Pause(ctx context.Context) error - // Unpause resumes all paused worker instances + // Unpause resumes the paused worker Unpause(ctx context.Context) error } @@ -76,11 +74,8 @@ type WorkerImpl struct { // v1 workers client workers features.WorkersClient - // nonDurableWorker is the underlying non-durable worker implementation. (default) - nonDurableWorker *worker.Worker - - // durableWorker is the underlying worker implementation for durable tasks. - durableWorker *worker.Worker + // worker is the underlying worker implementation. + worker *worker.Worker // name is the friendly name of the worker. name string @@ -160,16 +155,16 @@ func (w *WorkerImpl) RegisterWorkflows(workflows ...workflow.WorkflowBase) error for _, workflow := range workflows { dump, fns, durableFns, onFailureFn := workflow.Dump() - // Check if there are non-durable tasks in this workflow - hasNonDurableTasks := len(fns) > 0 || (dump.OnFailureTask != nil && onFailureFn != nil) - hasDurableTasks := len(durableFns) > 0 + hasAnyTasks := len(fns) > 0 || len(durableFns) > 0 || (dump.OnFailureTask != nil && onFailureFn != nil) - // Create non-durable worker on demand if needed and not already created - if hasNonDurableTasks && w.nonDurableWorker == nil { + // Create worker on demand if needed and not already created + if hasAnyTasks && w.worker == nil { + totalRuns := w.slots + w.durableSlots opts := []worker.WorkerOpt{ worker.WithClient(w.v0), worker.WithName(w.name), - worker.WithMaxRuns(w.slots), + worker.WithSlots(totalRuns), + worker.WithDurableSlots(w.durableSlots), worker.WithLogger(w.logger), worker.WithLogLevel(w.logLevel), worker.WithLabels(w.labels), @@ -179,79 +174,41 @@ func (w *WorkerImpl) RegisterWorkflows(workflows ...workflow.WorkflowBase) error opts = append(opts, worker.WithLogger(w.logger)) } - nonDurableWorker, err := worker.NewWorker( - opts..., - ) - if err != nil { - return err - } - w.nonDurableWorker = nonDurableWorker - } - - // Create durable worker on demand if needed and not already created - if hasDurableTasks && w.durableWorker == nil { - // Reuse logger from main worker if exists - var logger *zerolog.Logger - if w.nonDurableWorker != nil { - logger = w.nonDurableWorker.Logger() - } - - labels := make(map[string]interface{}) - for k, v := range w.labels { - labels[k] = fmt.Sprintf("%v-durable", v) - } - - opts := []worker.WorkerOpt{ - worker.WithClient(w.v0), - worker.WithName(w.name + "-durable"), - worker.WithMaxRuns(w.durableSlots), - worker.WithLogger(logger), - worker.WithLogLevel(w.logLevel), - worker.WithLabels(labels), - } - - durableWorker, err := worker.NewWorker( + wkr, err := worker.NewWorker( opts..., ) if err != nil { return err } - w.durableWorker = durableWorker + w.worker = wkr } - // Register workflow with non-durable worker if it exists - if w.nonDurableWorker != nil { - err := w.nonDurableWorker.RegisterWorkflowV1(dump) + // Register workflow with worker if it exists + if w.worker != nil { + err := w.worker.RegisterWorkflowV1(dump) if err != nil { return err } // Register non-durable actions for _, namedFn := range fns { - err := w.nonDurableWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) + err := w.worker.RegisterAction(namedFn.ActionID, namedFn.Fn) if err != nil { return err } } - if dump.OnFailureTask != nil && onFailureFn != nil { - actionId := dump.OnFailureTask.Action - err := w.nonDurableWorker.RegisterAction(actionId, onFailureFn) + // Register durable actions on the same worker + for _, namedFn := range durableFns { + err := w.worker.RegisterAction(namedFn.ActionID, namedFn.Fn) if err != nil { return err } } - } - // Register durable actions with durable worker - if w.durableWorker != nil { - err := w.durableWorker.RegisterWorkflowV1(dump) - if err != nil { - return err - } - - for _, namedFn := range durableFns { - err := w.durableWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) + if dump.OnFailureTask != nil && onFailureFn != nil { + actionId := dump.OnFailureTask.Action + err := w.worker.RegisterAction(actionId, onFailureFn) if err != nil { return err } @@ -269,65 +226,16 @@ func (w *WorkerImpl) RegisterWorkflows(workflows ...workflow.WorkflowBase) error // returns a cleanup function to be called when the worker should be stopped, // and any error encountered during startup. func (w *WorkerImpl) Start() (func() error, error) { - // Create slice of workers that exist - var workers []*worker.Worker - if w.nonDurableWorker != nil { - workers = append(workers, w.nonDurableWorker) + if w.worker == nil { + return func() error { return nil }, nil } - if w.durableWorker != nil { - workers = append(workers, w.durableWorker) - } - - // Track cleanup functions with a mutex to safely access from multiple goroutines - var cleanupFuncs []func() error - var cleanupMu sync.Mutex - - // Use errgroup to start workers concurrently - g := new(errgroup.Group) - - // Start all workers concurrently - for i := range workers { - worker := workers[i] // Capture the worker for the goroutine - g.Go(func() error { - cleanup, err := worker.Start() - if err != nil { - return fmt.Errorf("failed to start worker %s: %w", *worker.ID(), err) - } - cleanupMu.Lock() - cleanupFuncs = append(cleanupFuncs, cleanup) - cleanupMu.Unlock() - return nil - }) - } - - // Wait for all workers to start - if err := g.Wait(); err != nil { - // Clean up any workers that did start - for _, cleanupFn := range cleanupFuncs { - _ = cleanupFn() - } - return nil, err + cleanup, err := w.worker.Start() + if err != nil { + return nil, fmt.Errorf("failed to start worker %s: %w", *w.worker.ID(), err) } - // Return a combined cleanup function that also uses errgroup for concurrent cleanup - return func() error { - g := new(errgroup.Group) - - for _, cleanup := range cleanupFuncs { - cleanupFn := cleanup // Capture the cleanup function for the goroutine - g.Go(func() error { - return cleanupFn() - }) - } - - // Wait for all cleanup operations to complete and return any error - if err := g.Wait(); err != nil { - return fmt.Errorf("worker cleanup error: %w", err) - } - - return nil - }, nil + return cleanup, nil } // Deprecated: StartBlocking is part of the old generics-based v1 Go SDK. @@ -356,39 +264,11 @@ func (w *WorkerImpl) StartBlocking(ctx context.Context) error { // // IsPaused checks if all worker instances are paused func (w *WorkerImpl) IsPaused(ctx context.Context) (bool, error) { - // Create slice of worker IDs to check - var workerIDs []string - - if w.nonDurableWorker != nil { - mainID := w.nonDurableWorker.ID() - workerIDs = append(workerIDs, *mainID) - } - - if w.durableWorker != nil { - durableID := w.durableWorker.ID() - workerIDs = append(workerIDs, *durableID) - } - - // If no workers exist, consider it not paused - if len(workerIDs) == 0 { + if w.worker == nil { return false, nil } - // Check pause status for all workers - for _, id := range workerIDs { - isPaused, err := w.workers.IsPaused(ctx, id) - if err != nil { - return false, err - } - - // If any worker is not paused, return false - if !isPaused { - return false, nil - } - } - - // All workers are paused - return true, nil + return w.workers.IsPaused(ctx, *w.worker.ID()) } // Deprecated: Pause is part of the old generics-based v1 Go SDK. @@ -396,23 +276,12 @@ func (w *WorkerImpl) IsPaused(ctx context.Context) (bool, error) { // // Pause pauses all worker instances func (w *WorkerImpl) Pause(ctx context.Context) error { - // Pause main worker if it exists - if w.nonDurableWorker != nil { - _, err := w.workers.Pause(ctx, *w.nonDurableWorker.ID()) - if err != nil { - return err - } - } - - // Pause durable worker if it exists - if w.durableWorker != nil { - _, err := w.workers.Pause(ctx, *w.durableWorker.ID()) - if err != nil { - return err - } + if w.worker == nil { + return nil } - return nil + _, err := w.workers.Pause(ctx, *w.worker.ID()) + return err } // Deprecated: Unpause is part of the old generics-based v1 Go SDK. @@ -420,21 +289,10 @@ func (w *WorkerImpl) Pause(ctx context.Context) error { // // Unpause resumes all paused worker instances func (w *WorkerImpl) Unpause(ctx context.Context) error { - // Unpause main worker if it exists - if w.nonDurableWorker != nil { - _, err := w.workers.Unpause(ctx, *w.nonDurableWorker.ID()) - if err != nil { - return err - } - } - - // Unpause durable worker if it exists - if w.durableWorker != nil { - _, err := w.workers.Unpause(ctx, *w.durableWorker.ID()) - if err != nil { - return err - } + if w.worker == nil { + return nil } - return nil + _, err := w.workers.Unpause(ctx, *w.worker.ID()) + return err } diff --git a/pkg/validator/slot_requests_validation_test.go b/pkg/validator/slot_requests_validation_test.go new file mode 100644 index 0000000000..e37d47b231 --- /dev/null +++ b/pkg/validator/slot_requests_validation_test.go @@ -0,0 +1,83 @@ +//go:build !e2e && !load && !rampup && !integration + +package validator_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hatchet-dev/hatchet/pkg/repository" + "github.com/hatchet-dev/hatchet/pkg/validator" +) + +func TestSlotRequests_RejectsNonPositiveUnits(t *testing.T) { + v := validator.NewDefaultValidator() + + desc := "desc" + opts := &repository.CreateWorkflowVersionOpts{ + Name: "workflow-1", + // Description is used unconditionally downstream in PutWorkflowVersion, so set it. + Description: &desc, + Tasks: []repository.CreateStepOpts{ + { + ReadableId: "step-1", + Action: "svc:do", + SlotRequests: map[string]int32{ + repository.SlotTypeDefault: 0, + }, + }, + }, + } + + err := v.Validate(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "SlotRequests") + require.Contains(t, err.Error(), "gt") +} + +func TestSlotRequests_AllowsPositiveUnits(t *testing.T) { + v := validator.NewDefaultValidator() + + desc := "desc" + opts := &repository.CreateWorkflowVersionOpts{ + Name: "workflow-1", + Description: &desc, + Tasks: []repository.CreateStepOpts{ + { + ReadableId: "step-1", + Action: "svc:do", + SlotRequests: map[string]int32{ + repository.SlotTypeDefault: 1, + "gpu": 2, + }, + }, + }, + } + + require.NoError(t, v.Validate(opts)) +} + +func TestSlotRequests_RejectsEmptySlotTypeKey(t *testing.T) { + v := validator.NewDefaultValidator() + + desc := "desc" + opts := &repository.CreateWorkflowVersionOpts{ + Name: "workflow-1", + Description: &desc, + Tasks: []repository.CreateStepOpts{ + { + ReadableId: "step-1", + Action: "svc:do", + SlotRequests: map[string]int32{ + "": 1, + }, + }, + }, + } + + err := v.Validate(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "SlotRequests") + require.Contains(t, err.Error(), "required") +} diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go index e577e12d0c..865223afeb 100644 --- a/pkg/worker/worker.go +++ b/pkg/worker/worker.go @@ -25,6 +25,11 @@ import ( type actionFunc func(args ...any) []any +const ( + slotTypeDefault = "default" + slotTypeDurable = "durable" +) + // Deprecated: Action is an internal interface used by the new Go SDK. // Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go type Action interface { @@ -117,7 +122,14 @@ type Worker struct { middlewares *middlewares - slots *int + slots *int + durableSlots *int + slotConfig map[string]int32 + + // legacySlots, when non-nil, causes the registration to use the deprecated + // `slots` proto field instead of `slot_config`. For backward compatibility + // with engines that do not support multiple slot types. + legacySlots *int32 initActionNames []string @@ -142,6 +154,9 @@ type WorkerOpts struct { integrations []integrations.Integration alerter errors.Alerter slots *int + durableSlots *int + slotConfig map[string]int32 + legacySlots *int32 actions []string @@ -211,6 +226,31 @@ func WithSlots(slots int) WorkerOpt { } } +// Deprecated: WithSlots is an internal function used by the new Go SDK. +// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go +func WithDurableSlots(durableSlots int) WorkerOpt { + return func(opts *WorkerOpts) { + opts.durableSlots = &durableSlots + } +} + +// Deprecated: WithSlots is an internal function used by the new Go SDK. +// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go +func WithSlotConfig(slotConfig map[string]int32) WorkerOpt { + return func(opts *WorkerOpts) { + opts.slotConfig = slotConfig + } +} + +// WithLegacySlots configures the worker to register using the deprecated `slots` +// proto field instead of `slot_config`. This is for backward compatibility with +// engines that do not support multiple slot types. +func WithLegacySlots(slots int32) WorkerOpt { + return func(opts *WorkerOpts) { + opts.legacySlots = &slots + } +} + // Deprecated: WithLabels is an internal function used by the new Go SDK. // Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go func WithLabels(labels map[string]interface{}) WorkerOpt { @@ -278,6 +318,33 @@ func NewWorker(fs ...WorkerOpt) (*Worker, error) { opts.l = &l } + if opts.slotConfig != nil && (opts.slots != nil || opts.durableSlots != nil) { + return nil, fmt.Errorf("cannot set both slot config and slots/durable slots") + } + + // Backwards compatibility: + // If callers used the older slots/durableSlots options, map them to a slot config so that + // worker registration continues to work with multiple slot types. + if opts.slotConfig == nil { + legacySlotConfig := map[string]int32{} + + if opts.slots != nil { + legacySlotConfig[slotTypeDefault] = int32(*opts.slots) // nolint:gosec + } + + if opts.durableSlots != nil { + legacySlotConfig[slotTypeDurable] = int32(*opts.durableSlots) // nolint:gosec + } + + if len(legacySlotConfig) > 0 { + opts.slotConfig = legacySlotConfig + } + } + + if opts.slotConfig == nil { + opts.slotConfig = map[string]int32{slotTypeDefault: 100} + } + w := &Worker{ client: opts.client, name: opts.name, @@ -286,6 +353,9 @@ func NewWorker(fs ...WorkerOpt) (*Worker, error) { alerter: opts.alerter, middlewares: mws, slots: opts.slots, + durableSlots: opts.durableSlots, + slotConfig: opts.slotConfig, + legacySlots: opts.legacySlots, initActionNames: opts.actions, labels: opts.labels, registered_workflows: map[string]bool{}, @@ -516,10 +586,11 @@ func (w *Worker) startBlocking(ctx context.Context) error { _ = NewManagedCompute(&w.actions, w.client, 1) listener, id, err := w.client.Dispatcher().GetActionListener(ctx, &client.GetActionListenerRequest{ - WorkerName: w.name, - Actions: actionNames, - Slots: w.slots, - Labels: w.labels, + WorkerName: w.name, + Actions: actionNames, + Labels: w.labels, + SlotConfig: w.slotConfig, + LegacySlots: w.legacySlots, }) w.id = id diff --git a/sdks/go/client.go b/sdks/go/client.go index 9a5a3891d3..10827c00ac 100644 --- a/sdks/go/client.go +++ b/sdks/go/client.go @@ -49,11 +49,22 @@ func NewClient(opts ...v0Client.ClientOpt) (*Client, error) { // Worker represents a worker that can execute workflows. type Worker struct { - nonDurable *worker.Worker - durable *worker.Worker - name string + worker *worker.Worker + name string + + // legacyDurable is set when connected to an older engine that needs separate + // durable/non-durable workers. nil when using the new unified slot_config approach. + legacyDurable *worker.Worker } +// slotType represents supported slot types (internal use). +type slotType string + +const ( + slotTypeDefault slotType = "default" + slotTypeDurable slotType = "durable" +) + // NewWorker creates a worker that can execute workflows. func (c *Client) NewWorker(name string, options ...WorkerOption) (*Worker, error) { config := &workerConfig{ @@ -65,12 +76,37 @@ func (c *Client) NewWorker(name string, options ...WorkerOption) (*Worker, error opt(config) } + dumps := gatherWorkflowDumps(config.workflows) + + // Check engine version to decide between new and legacy worker architecture + isLegacy, err := c.isLegacyEngine() + if err != nil { + return nil, err + } + if isLegacy { + return newLegacyWorker(c, name, config, dumps) + } + + initialSlotConfig := map[slotType]int{} + if config.slotsSet { + initialSlotConfig[slotTypeDefault] = config.slots + } + if config.durableSlotsSet { + initialSlotConfig[slotTypeDurable] = config.durableSlots + } + slotConfig := resolveWorkerSlotConfig(initialSlotConfig, dumps) + workerOpts := []worker.WorkerOpt{ worker.WithClient(c.legacyClient), worker.WithName(name), - worker.WithMaxRuns(config.slots), } + slotConfigMap := make(map[string]int32, len(slotConfig)) + for key, value := range slotConfig { + slotConfigMap[string(key)] = int32(value) + } + workerOpts = append(workerOpts, worker.WithSlotConfig(slotConfigMap)) + if config.logger != nil { workerOpts = append(workerOpts, worker.WithLogger(config.logger)) } @@ -79,67 +115,40 @@ func (c *Client) NewWorker(name string, options ...WorkerOption) (*Worker, error workerOpts = append(workerOpts, worker.WithLabels(config.labels)) } - nonDurableWorker, err := worker.NewWorker(workerOpts...) + mainWorker, err := worker.NewWorker(workerOpts...) if err != nil { return nil, err } if config.panicHandler != nil { - nonDurableWorker.SetPanicHandler(config.panicHandler) + mainWorker.SetPanicHandler(config.panicHandler) } - var durableWorker *worker.Worker - - for _, workflow := range config.workflows { - req, regularActions, durableActions, onFailureFn := workflow.Dump() - hasDurableTasks := len(durableActions) > 0 - - if hasDurableTasks { - if durableWorker == nil { - durableWorkerOpts := workerOpts - durableWorkerOpts = append(durableWorkerOpts, worker.WithName(name+"-durable")) - durableWorkerOpts = append(durableWorkerOpts, worker.WithMaxRuns(config.durableSlots)) - - durableWorker, err = worker.NewWorker(durableWorkerOpts...) - if err != nil { - return nil, err - } - - if config.panicHandler != nil { - durableWorker.SetPanicHandler(config.panicHandler) - } - } - - err := durableWorker.RegisterWorkflowV1(req) - if err != nil { - return nil, err - } - } else { - err := nonDurableWorker.RegisterWorkflowV1(req) - if err != nil { - return nil, err - } + for _, dump := range dumps { + err := mainWorker.RegisterWorkflowV1(dump.req) + if err != nil { + return nil, err } - for _, namedFn := range durableActions { - err = durableWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) + for _, namedFn := range dump.durableActions { + err = mainWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) if err != nil { return nil, err } } - for _, namedFn := range regularActions { - err = nonDurableWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) + for _, namedFn := range dump.regularActions { + err = mainWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) if err != nil { return nil, err } } // Register on failure function if exists - if req.OnFailureTask != nil && onFailureFn != nil { - actionId := req.OnFailureTask.Action - err = nonDurableWorker.RegisterAction(actionId, func(ctx worker.HatchetContext) (any, error) { - return onFailureFn(ctx) + if dump.req.OnFailureTask != nil && dump.onFailureFn != nil { + actionId := dump.req.OnFailureTask.Action + err = mainWorker.RegisterAction(actionId, func(ctx worker.HatchetContext) (any, error) { + return dump.onFailureFn(ctx) }) if err != nil { return nil, err @@ -148,22 +157,97 @@ func (c *Client) NewWorker(name string, options ...WorkerOption) (*Worker, error } return &Worker{ - nonDurable: nonDurableWorker, - durable: durableWorker, - name: name, + worker: mainWorker, + name: name, }, nil } +type workflowDump struct { + req *v1.CreateWorkflowVersionRequest + regularActions []internal.NamedFunction + durableActions []internal.NamedFunction + onFailureFn internal.WrappedTaskFn +} + +func gatherWorkflowDumps(workflows []WorkflowBase) []workflowDump { + dumps := make([]workflowDump, 0, len(workflows)) + for _, workflow := range workflows { + req, regularActions, durableActions, onFailureFn := workflow.Dump() + dumps = append(dumps, workflowDump{ + req: req, + regularActions: regularActions, + durableActions: durableActions, + onFailureFn: onFailureFn, + }) + } + return dumps +} + +func resolveWorkerSlotConfig( + slotConfig map[slotType]int, + dumps []workflowDump, +) map[slotType]int { + requiredSlotTypes := map[slotType]bool{} + addFromRequests := func(requests map[string]int32) { + if requests == nil { + return + } + if _, ok := requests[string(slotTypeDefault)]; ok { + requiredSlotTypes[slotTypeDefault] = true + } + if _, ok := requests[string(slotTypeDurable)]; ok { + requiredSlotTypes[slotTypeDurable] = true + } + } + + for _, dump := range dumps { + for _, task := range dump.req.Tasks { + addFromRequests(task.SlotRequests) + } + if dump.req.OnFailureTask != nil { + addFromRequests(dump.req.OnFailureTask.SlotRequests) + } + } + + if len(dumps) > 0 { + for _, dump := range dumps { + for _, task := range dump.req.Tasks { + if task.IsDurable { + requiredSlotTypes[slotTypeDurable] = true + break + } + } + } + } + + if requiredSlotTypes[slotTypeDefault] { + if _, ok := slotConfig[slotTypeDefault]; !ok { + slotConfig[slotTypeDefault] = 100 + } + } + if requiredSlotTypes[slotTypeDurable] { + if _, ok := slotConfig[slotTypeDurable]; !ok { + slotConfig[slotTypeDurable] = 1000 + } + } + + if len(slotConfig) == 0 { + slotConfig[slotTypeDefault] = 100 + } + + return slotConfig +} + // Starts the worker instance and returns a cleanup function. func (w *Worker) Start() (func() error, error) { var workers []*worker.Worker - if w.nonDurable != nil { - workers = append(workers, w.nonDurable) + if w.worker != nil { + workers = append(workers, w.worker) } - if w.durable != nil { - workers = append(workers, w.durable) + if w.legacyDurable != nil { + workers = append(workers, w.legacyDurable) } // Track cleanup functions with a mutex to safely access from multiple goroutines diff --git a/sdks/go/deprecated_worker.go b/sdks/go/deprecated_worker.go new file mode 100644 index 0000000000..39625c5665 --- /dev/null +++ b/sdks/go/deprecated_worker.go @@ -0,0 +1,165 @@ +package hatchet + +// Legacy dual-worker creation for pre-slot-config engines. +// When connected to an older Hatchet engine that does not support multiple slot types, +// this module provides the old NewWorker flow which creates separate durable and +// non-durable workers, each registered with the legacy `slots` proto field. + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hatchet-dev/hatchet/pkg/worker" +) + +// minSlotConfigVersion is the minimum engine version that supports multiple slot types. +const minSlotConfigVersion = "v0.78.23" + +// legacyEngineDeprecationStart is the date when slot_config support was released. +var legacyEngineDeprecationStart = time.Date(2026, 2, 12, 0, 0, 0, 0, time.UTC) + +const legacyEngineMessage = "Connected to an older Hatchet engine that does not support multiple slot types. " + + "Falling back to legacy worker registration. " + + "Please upgrade your Hatchet engine to the latest version." + +// isLegacyEngine checks whether the engine supports the new slot_config registration +// by comparing the engine's semantic version against the minimum required version. +// Returns true if the engine is legacy (does not implement GetVersion or reports +// a version older than minSlotConfigVersion). +func (c *Client) isLegacyEngine() (bool, error) { + ctx := context.Background() + version, err := c.legacyClient.Dispatcher().GetVersion(ctx) + if err != nil { + if status.Code(err) == codes.Unimplemented { + l := c.legacyClient.Logger() + if depErr := EmitDeprecationNotice("legacy-engine", legacyEngineMessage, legacyEngineDeprecationStart, l, &DeprecationOpts{ + ErrorWindow: 180 * 24 * time.Hour, + }); depErr != nil { + return false, fmt.Errorf("legacy engine deprecated: %w", depErr) + } + return true, nil + } + // For other errors (e.g., connectivity), assume new engine and let registration fail naturally + return false, nil + } + + // If the version is empty or older than the minimum, treat as legacy + if version == "" || SemverLessThan(version, minSlotConfigVersion) { + l := c.legacyClient.Logger() + if depErr := EmitDeprecationNotice("legacy-engine", legacyEngineMessage, legacyEngineDeprecationStart, l, &DeprecationOpts{ + ErrorWindow: 180 * 24 * time.Hour, + }); depErr != nil { + return false, fmt.Errorf("legacy engine deprecated: %w", depErr) + } + return true, nil + } + + return false, nil +} + +// newLegacyWorker creates workers using the old dual-worker pattern for pre-slot-config engines. +// Uses WithLegacySlots so that registration sends the deprecated `slots` proto field +// instead of `slot_config`. +func newLegacyWorker(c *Client, name string, config *workerConfig, dumps []workflowDump) (*Worker, error) { + workerOpts := []worker.WorkerOpt{ + worker.WithClient(c.legacyClient), + worker.WithName(name), + worker.WithLegacySlots(int32(config.slots)), // nolint:gosec + } + + if config.logger != nil { + workerOpts = append(workerOpts, worker.WithLogger(config.logger)) + } + + if config.labels != nil { + workerOpts = append(workerOpts, worker.WithLabels(config.labels)) + } + + nonDurableWorker, err := worker.NewWorker(workerOpts...) + if err != nil { + return nil, err + } + + if config.panicHandler != nil { + nonDurableWorker.SetPanicHandler(config.panicHandler) + } + + var durableWorker *worker.Worker + + for _, dump := range dumps { + hasDurableTasks := len(dump.durableActions) > 0 + + if hasDurableTasks { + if durableWorker == nil { + durableWorkerOpts := []worker.WorkerOpt{ + worker.WithClient(c.legacyClient), + worker.WithName(name + "-durable"), + worker.WithLegacySlots(int32(config.durableSlots)), // nolint:gosec + } + + if config.logger != nil { + durableWorkerOpts = append(durableWorkerOpts, worker.WithLogger(config.logger)) + } + + if config.labels != nil { + durableWorkerOpts = append(durableWorkerOpts, worker.WithLabels(config.labels)) + } + + durableWorker, err = worker.NewWorker(durableWorkerOpts...) + if err != nil { + return nil, err + } + + if config.panicHandler != nil { + durableWorker.SetPanicHandler(config.panicHandler) + } + } + + err := durableWorker.RegisterWorkflowV1(dump.req) + if err != nil { + return nil, err + } + } else { + err := nonDurableWorker.RegisterWorkflowV1(dump.req) + if err != nil { + return nil, err + } + } + + for _, namedFn := range dump.durableActions { + err = durableWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) + if err != nil { + return nil, err + } + } + + for _, namedFn := range dump.regularActions { + err = nonDurableWorker.RegisterAction(namedFn.ActionID, namedFn.Fn) + if err != nil { + return nil, err + } + } + + // Register on failure function if exists + if dump.req.OnFailureTask != nil && dump.onFailureFn != nil { + actionId := dump.req.OnFailureTask.Action + onFailure := dump.onFailureFn // capture for closure + err = nonDurableWorker.RegisterAction(actionId, func(ctx worker.HatchetContext) (any, error) { + return onFailure(ctx) + }) + if err != nil { + return nil, err + } + } + } + + return &Worker{ + worker: nonDurableWorker, + legacyDurable: durableWorker, + name: name, + }, nil +} diff --git a/sdks/go/deprecation.go b/sdks/go/deprecation.go new file mode 100644 index 0000000000..9c970874c5 --- /dev/null +++ b/sdks/go/deprecation.go @@ -0,0 +1,140 @@ +package hatchet + +// Generic time-aware deprecation helper. +// +// Timeline (from a given start time, with configurable windows): +// 0 to WarnWindow: WARNING logged once per feature +// WarnWindow to ErrorWindow: ERROR logged once per feature +// after ErrorWindow: returns an error 1-in-5 calls (20% chance) +// +// Defaults: WarnWindow=90d, ErrorWindow=0 (error phase disabled unless set). + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "time" + + "github.com/rs/zerolog" +) + +const ( + defaultWarnWindow = 90 * 24 * time.Hour +) + +var ( + deprecationMu sync.Mutex + deprecationLogged = map[string]bool{} +) + +// DeprecationError is returned when a deprecation grace period has expired. +type DeprecationError struct { + Feature string + Message string +} + +func (e *DeprecationError) Error() string { + return fmt.Sprintf("%s: %s", e.Feature, e.Message) +} + +// DeprecationOpts provides optional configuration for EmitDeprecationNotice. +type DeprecationOpts struct { + // WarnWindow is how long after start the notice is a warning. + // Defaults to 90 days if zero. + WarnWindow time.Duration + + // ErrorWindow is how long after start the notice is an error log. + // After this window, calls have a 20% chance of returning an error. + // If zero (default), the error/raise phase is never reached — the notice + // stays at error-level logging indefinitely. + ErrorWindow time.Duration +} + +// EmitDeprecationNotice emits a time-aware deprecation notice. +// +// - feature: a short identifier for deduplication (each feature logs once). +// - message: the human-readable deprecation message. +// - start: the UTC time when the deprecation window began. +// - logger: the zerolog logger to write to. +// - opts: optional configuration; pass nil for defaults. +// +// Returns a non-nil *DeprecationError only in phase 3 (~20% chance). +func EmitDeprecationNotice(feature, message string, start time.Time, logger *zerolog.Logger, opts *DeprecationOpts) error { + warnWindow := defaultWarnWindow + var errorWindow time.Duration // zero means "never" + + if opts != nil { + if opts.WarnWindow > 0 { + warnWindow = opts.WarnWindow + } + errorWindow = opts.ErrorWindow + } + + elapsed := time.Since(start) + + deprecationMu.Lock() + alreadyLogged := deprecationLogged[feature] + if !alreadyLogged { + deprecationLogged[feature] = true + } + deprecationMu.Unlock() + + switch { + case elapsed < warnWindow: + // Phase 1: warning + if !alreadyLogged { + logger.Warn().Msg(message) + } + + case errorWindow <= 0 || elapsed < errorWindow: + // Phase 2: error-level log (indefinite when errorWindow is 0) + if !alreadyLogged { + logger.Error().Msgf("%s This fallback will be removed soon. Upgrade immediately.", message) + } + + default: + // Phase 3: raise 1-in-5 times + if !alreadyLogged { + logger.Error().Msgf("%s This fallback is no longer supported and will fail intermittently.", message) + } + + if rand.Float64() < 0.2 { //nolint:gosec + return &DeprecationError{Feature: feature, Message: message} + } + } + + return nil +} + +// ParseSemver extracts major, minor, patch from a version string like "v0.78.23". +// Returns (0,0,0) if parsing fails. +func ParseSemver(v string) (int, int, int) { + v = strings.TrimPrefix(v, "v") + // Strip any pre-release suffix (e.g. "-alpha.0") + if idx := strings.Index(v, "-"); idx != -1 { + v = v[:idx] + } + parts := strings.Split(v, ".") + if len(parts) != 3 { + return 0, 0, 0 + } + major, _ := strconv.Atoi(parts[0]) + minor, _ := strconv.Atoi(parts[1]) + patch, _ := strconv.Atoi(parts[2]) + return major, minor, patch +} + +// SemverLessThan returns true if version a is strictly less than version b. +func SemverLessThan(a, b string) bool { + aMaj, aMin, aPat := ParseSemver(a) + bMaj, bMin, bPat := ParseSemver(b) + if aMaj != bMaj { + return aMaj < bMaj + } + if aMin != bMin { + return aMin < bMin + } + return aPat < bPat +} diff --git a/sdks/go/deprecation_test.go b/sdks/go/deprecation_test.go new file mode 100644 index 0000000000..d94040af85 --- /dev/null +++ b/sdks/go/deprecation_test.go @@ -0,0 +1,52 @@ +package hatchet + +import "testing" + +func TestParseSemver(t *testing.T) { + tests := []struct { + input string + major, minor, patch int + }{ + {"v0.78.23", 0, 78, 23}, + {"v1.2.3", 1, 2, 3}, + {"0.78.23", 0, 78, 23}, + {"v0.1.0-alpha.0", 0, 1, 0}, + {"v10.20.30-rc.1", 10, 20, 30}, + {"", 0, 0, 0}, + {"v1.2", 0, 0, 0}, + {"not-a-version", 0, 0, 0}, + } + + for _, tt := range tests { + major, minor, patch := ParseSemver(tt.input) + if major != tt.major || minor != tt.minor || patch != tt.patch { + t.Errorf("ParseSemver(%q) = (%d, %d, %d), want (%d, %d, %d)", + tt.input, major, minor, patch, tt.major, tt.minor, tt.patch) + } + } +} + +func TestSemverLessThan(t *testing.T) { + tests := []struct { + a, b string + want bool + }{ + {"v0.78.22", "v0.78.23", true}, + {"v0.78.23", "v0.78.23", false}, + {"v0.78.24", "v0.78.23", false}, + {"v0.77.99", "v0.78.0", true}, + {"v0.79.0", "v0.78.99", false}, + {"v0.78.23", "v1.0.0", true}, + {"v1.0.0", "v0.99.99", false}, + {"v0.1.0-alpha.0", "v0.78.23", true}, + {"", "v0.78.23", true}, + {"v0.78.23", "", false}, + } + + for _, tt := range tests { + got := SemverLessThan(tt.a, tt.b) + if got != tt.want { + t.Errorf("SemverLessThan(%q, %q) = %v, want %v", tt.a, tt.b, got, tt.want) + } + } +} diff --git a/sdks/go/internal/task/task.go b/sdks/go/internal/task/task.go index 0a469dd48f..508d130ab3 100644 --- a/sdks/go/internal/task/task.go +++ b/sdks/go/internal/task/task.go @@ -19,6 +19,11 @@ type TaskBase interface { Dump(workflowName string, taskDefaults *create.TaskDefaults) *contracts.CreateTaskOpts } +const ( + slotTypeDefault = "default" + slotTypeDurable = "durable" +) + type TaskShared struct { // ExecutionTimeout specifies the maximum duration a task can run before being terminated ExecutionTimeout *time.Duration @@ -231,6 +236,10 @@ func (t *TaskDeclaration[I]) Dump(workflowName string, taskDefaults *create.Task base := makeContractTaskOpts(&t.TaskShared, taskDefaults) base.ReadableId = t.Name base.Action = getActionID(workflowName, t.Name) + base.IsDurable = false + if base.SlotRequests == nil { + base.SlotRequests = map[string]int32{slotTypeDefault: 1} + } base.Parents = make([]string, len(t.Parents)) copy(base.Parents, t.Parents) @@ -283,6 +292,10 @@ func (t *DurableTaskDeclaration[I]) Dump(workflowName string, taskDefaults *crea base := makeContractTaskOpts(&t.TaskShared, taskDefaults) base.ReadableId = t.Name base.Action = getActionID(workflowName, t.Name) + base.IsDurable = true + if base.SlotRequests == nil { + base.SlotRequests = map[string]int32{slotTypeDurable: 1} + } base.Parents = make([]string, len(t.Parents)) copy(base.Parents, t.Parents) return base @@ -294,6 +307,10 @@ func (t *OnFailureTaskDeclaration[I]) Dump(workflowName string, taskDefaults *cr base.ReadableId = "on-failure" base.Action = getActionID(workflowName, "on-failure") + base.IsDurable = false + if base.SlotRequests == nil { + base.SlotRequests = map[string]int32{slotTypeDefault: 1} + } return base } diff --git a/sdks/go/slot_capacities_test.go b/sdks/go/slot_capacities_test.go new file mode 100644 index 0000000000..65fb143772 --- /dev/null +++ b/sdks/go/slot_capacities_test.go @@ -0,0 +1,148 @@ +package hatchet + +import ( + "testing" + + v1 "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" +) + +func TestResolveWorkerSlotConfig_NoDurable(t *testing.T) { + dumps := []workflowDump{ + { + req: &v1.CreateWorkflowVersionRequest{ + Tasks: []*v1.CreateTaskOpts{ + { + IsDurable: false, + SlotRequests: map[string]int32{"default": 1}, + }, + }, + }, + }, + } + + resolved := resolveWorkerSlotConfig(map[slotType]int{}, dumps) + + if resolved[slotTypeDefault] != 100 { + t.Fatalf("expected default slots to be 100, got %d", resolved[slotTypeDefault]) + } + if _, ok := resolved[slotTypeDurable]; ok { + t.Fatalf("expected durable slots to be unset, got %d", resolved[slotTypeDurable]) + } +} + +func TestResolveWorkerSlotConfig_OnlyDurable(t *testing.T) { + dumps := []workflowDump{ + { + req: &v1.CreateWorkflowVersionRequest{ + Tasks: []*v1.CreateTaskOpts{ + { + IsDurable: true, + SlotRequests: map[string]int32{"durable": 1}, + }, + }, + }, + }, + } + + resolved := resolveWorkerSlotConfig(map[slotType]int{}, dumps) + + if resolved[slotTypeDurable] != 1000 { + t.Fatalf("expected durable slots to be 1000, got %d", resolved[slotTypeDurable]) + } + if _, ok := resolved[slotTypeDefault]; ok { + t.Fatalf("expected default slots to be unset, got %d", resolved[slotTypeDefault]) + } +} + +func TestResolveWorkerSlotConfig_Mixed(t *testing.T) { + dumps := []workflowDump{ + { + req: &v1.CreateWorkflowVersionRequest{ + Tasks: []*v1.CreateTaskOpts{ + { + IsDurable: false, + SlotRequests: map[string]int32{"default": 1}, + }, + { + IsDurable: true, + SlotRequests: map[string]int32{"durable": 1}, + }, + }, + }, + }, + } + + resolved := resolveWorkerSlotConfig(map[slotType]int{}, dumps) + + if resolved[slotTypeDefault] != 100 { + t.Fatalf("expected default slots to be 100, got %d", resolved[slotTypeDefault]) + } + if resolved[slotTypeDurable] != 1000 { + t.Fatalf("expected durable slots to be 1000, got %d", resolved[slotTypeDurable]) + } +} + +func TestResolveWorkerSlotConfig_UserConfiguredSlots(t *testing.T) { + dumps := []workflowDump{ + { + req: &v1.CreateWorkflowVersionRequest{ + Tasks: []*v1.CreateTaskOpts{ + { + IsDurable: false, + SlotRequests: map[string]int32{"default": 1}, + }, + { + IsDurable: true, + SlotRequests: map[string]int32{"durable": 1}, + }, + }, + }, + }, + } + + // Simulate user calling WithSlots(10) and WithDurableSlots(50) + initial := map[slotType]int{ + slotTypeDefault: 10, + slotTypeDurable: 50, + } + resolved := resolveWorkerSlotConfig(initial, dumps) + + if resolved[slotTypeDefault] != 10 { + t.Fatalf("expected user-configured default slots to be 10, got %d", resolved[slotTypeDefault]) + } + if resolved[slotTypeDurable] != 50 { + t.Fatalf("expected user-configured durable slots to be 50, got %d", resolved[slotTypeDurable]) + } +} + +func TestResolveWorkerSlotConfig_UserConfiguredPartial(t *testing.T) { + dumps := []workflowDump{ + { + req: &v1.CreateWorkflowVersionRequest{ + Tasks: []*v1.CreateTaskOpts{ + { + IsDurable: false, + SlotRequests: map[string]int32{"default": 1}, + }, + { + IsDurable: true, + SlotRequests: map[string]int32{"durable": 1}, + }, + }, + }, + }, + } + + // Simulate user calling WithSlots(10) but NOT WithDurableSlots + initial := map[slotType]int{ + slotTypeDefault: 10, + } + resolved := resolveWorkerSlotConfig(initial, dumps) + + if resolved[slotTypeDefault] != 10 { + t.Fatalf("expected user-configured default slots to be 10, got %d", resolved[slotTypeDefault]) + } + if resolved[slotTypeDurable] != 1000 { + t.Fatalf("expected durable slots to use default 1000, got %d", resolved[slotTypeDurable]) + } +} diff --git a/sdks/go/worker.go b/sdks/go/worker.go index 43fc455d0e..11869a4fad 100644 --- a/sdks/go/worker.go +++ b/sdks/go/worker.go @@ -11,12 +11,14 @@ import ( type WorkerOption func(*workerConfig) type workerConfig struct { - workflows []WorkflowBase - slots int - durableSlots int - labels map[string]any - logger *zerolog.Logger - panicHandler func(ctx Context, recovered any) + workflows []WorkflowBase + slots int + slotsSet bool + durableSlots int + durableSlotsSet bool + labels map[string]any + logger *zerolog.Logger + panicHandler func(ctx Context, recovered any) } type WorkflowBase interface { @@ -39,6 +41,7 @@ func WithWorkflows(workflows ...WorkflowBase) WorkerOption { func WithSlots(slots int) WorkerOption { return func(config *workerConfig) { config.slots = slots + config.slotsSet = true } } @@ -60,6 +63,7 @@ func WithLogger(logger *zerolog.Logger) WorkerOption { func WithDurableSlots(durableSlots int) WorkerOption { return func(config *workerConfig) { config.durableSlots = durableSlots + config.durableSlotsSet = true } } diff --git a/sdks/python/examples/durable/test_durable.py b/sdks/python/examples/durable/test_durable.py index 562bb169ed..1eac42388d 100644 --- a/sdks/python/examples/durable/test_durable.py +++ b/sdks/python/examples/durable/test_durable.py @@ -27,15 +27,10 @@ async def test_durable(hatchet: Hatchet) -> None: active_workers = [w for w in workers.rows if w.status == "ACTIVE"] - assert len(active_workers) >= 2 assert any( w.name == hatchet.config.apply_namespace("e2e-test-worker") for w in active_workers ) - assert any( - w.name == hatchet.config.apply_namespace("e2e-test-worker_durable") - for w in active_workers - ) assert result["durable_task"]["status"] == "success" diff --git a/sdks/python/examples/simple/chaos_test.py b/sdks/python/examples/simple/chaos_test.py new file mode 100644 index 0000000000..df9d14d7e4 --- /dev/null +++ b/sdks/python/examples/simple/chaos_test.py @@ -0,0 +1,156 @@ +# > Simple +import argparse +import asyncio +import signal +import threading +import time +import traceback +from typing import Any + +from datetime import datetime, timezone +from pathlib import Path + +from hatchet_sdk import Context, EmptyModel, Hatchet + +hatchet = Hatchet(debug=True) + +FAILURE_LOG = Path(__file__).parent / "failures.log" + +# Track the current worker so we can clean up on Ctrl+C +_current_worker = None +_current_thread = None +# poetry run python ./simple/worker_test.py --suffix new + + +def log_failure(phase: str, error: Exception) -> None: + """Log a failure loudly to stderr and append to the failures log file.""" + timestamp = datetime.now(timezone.utc).isoformat() + tb = traceback.format_exception(type(error), error, error.__traceback__) + tb_str = "".join(tb) + + msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}" + + # Loud stderr output + print(f"\n{'!' * 60}", flush=True) + print(f"!!! FAILURE: {phase} !!!", flush=True) + print(msg, flush=True) + print(f"{'!' * 60}\n", flush=True) + + # Append to log file + with open(FAILURE_LOG, "a") as f: + f.write(msg) + f.write("-" * 60 + "\n") + + +@hatchet.task() +def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: + print("Executing simple task!") + return {"result": "Hello, world!"} + + +@hatchet.durable_task() +def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: + print("Executing durable task!") + return {"result": "Hello from durable!"} + + +def _force_stop_worker(worker: Any, thread: threading.Thread) -> None: + """Forcefully terminate the worker and its child processes.""" + worker.killing = True + worker._terminate_processes() + worker._close_queues() + if worker.loop and worker.loop.is_running(): + worker.loop.call_soon_threadsafe(worker.loop.stop) + thread.join(timeout=5) + + +def start_worker(suffix: str = "") -> tuple[Any, threading.Thread]: + """Create and start a worker in a background thread.""" + name = f"test-worker-{suffix}" if suffix else "test-worker" + worker = hatchet.worker( + name, + workflows=[simple, simple_durable], + slots=10, + ) + worker.handle_kill = False # Prevent sys.exit on shutdown + + # Restore default signal handlers so Ctrl+C raises KeyboardInterrupt + signal.signal(signal.SIGINT, signal.default_int_handler) + signal.signal(signal.SIGTERM, signal.SIG_DFL) + + thread = threading.Thread(target=worker.start, daemon=True) + thread.start() + + # Give the worker a moment to initialize + time.sleep(2) + print("Worker connected.") + return worker, thread + + +def stop_worker(worker: Any, thread: threading.Thread) -> None: + """Stop the worker gracefully.""" + try: + if worker.loop and worker.loop.is_running(): + asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop) + thread.join(timeout=10) + if thread.is_alive(): + _force_stop_worker(worker, thread) + print("Worker disconnected.") + except Exception as e: + log_failure("worker disconnect", e) + + +def main() -> None: + global _current_worker, _current_thread + + parser = argparse.ArgumentParser() + parser.add_argument( + "--suffix", + default="", + help="Suffix to append to the worker name (e.g. 'old' or 'new')", + ) + args = parser.parse_args() + + try: + while True: + # --- Connect the worker --- + print("\n=== Connecting worker ===") + try: + worker, thread = start_worker(args.suffix) + _current_worker, _current_thread = worker, thread + except Exception as e: + log_failure("worker connect", e) + time.sleep(5) + continue + + # --- Trigger tasks every 1 second for 5 seconds --- + for tick in range(5): + time.sleep(1) + print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---") + try: + ref = simple.run_no_wait() + print(f"Task triggered: {ref}") + except Exception as e: + log_failure(f"task trigger (tick {tick + 1}/5)", e) + try: + ref = simple_durable.run_no_wait() + print(f"Durable task triggered: {ref}") + except Exception as e: + log_failure(f"durable task trigger (tick {tick + 1}/5)", e) + + # --- Disconnect the worker --- + print("\n=== Disconnecting worker ===") + stop_worker(worker, thread) + _current_worker, _current_thread = None, None + + except KeyboardInterrupt: + print("\n\nCtrl+C received, shutting down...") + if _current_worker and _current_thread: + _force_stop_worker(_current_worker, _current_thread) + print("Bye!") + + +# !! + +if __name__ == "__main__": + main() diff --git a/sdks/python/examples/simple/worker.py b/sdks/python/examples/simple/worker.py index 7986b21fd5..686742c4fb 100644 --- a/sdks/python/examples/simple/worker.py +++ b/sdks/python/examples/simple/worker.py @@ -1,5 +1,4 @@ # > Simple - from hatchet_sdk import Context, EmptyModel, Hatchet hatchet = Hatchet(debug=True) @@ -16,7 +15,10 @@ def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: def main() -> None: - worker = hatchet.worker("test-worker", workflows=[simple, simple_durable]) + worker = hatchet.worker( + "test-worker", + workflows=[simple, simple_durable], + ) worker.start() diff --git a/sdks/python/hatchet_sdk/__init__.py b/sdks/python/hatchet_sdk/__init__.py index 32475544d9..6fe497718a 100644 --- a/sdks/python/hatchet_sdk/__init__.py +++ b/sdks/python/hatchet_sdk/__init__.py @@ -177,6 +177,7 @@ from hatchet_sdk.serde import is_in_hatchet_serialization_context from hatchet_sdk.utils.opentelemetry import OTelAttribute from hatchet_sdk.utils.serde import remove_null_unicode_character +from hatchet_sdk.worker.slot_types import SlotType from hatchet_sdk.worker.worker import Worker, WorkerStartOptions, WorkerStatus from hatchet_sdk.workflow_run import WorkflowRunRef @@ -254,6 +255,7 @@ "RunStatus", "ScheduleTriggerWorkflowOptions", "SleepCondition", + "SlotType", "StepRun", "StepRunDiff", "StepRunEventType", diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py index 83df0464e1..4607991071 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py @@ -48,7 +48,7 @@ class GetActionListenerRequest(BaseModel): worker_name: str services: list[str] actions: list[str] - slots: int + slot_config: dict[str, int] raw_labels: dict[str, str | int] = Field(default_factory=dict) labels: dict[str, WorkerLabels] = Field(default_factory=dict) diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py b/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py index 2f845ec402..d490816284 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py @@ -18,6 +18,8 @@ STEP_EVENT_TYPE_COMPLETED, STEP_EVENT_TYPE_FAILED, ActionEventResponse, + GetVersionRequest, + GetVersionResponse, OverridesData, RefreshTimeoutRequest, ReleaseSlotRequest, @@ -74,8 +76,8 @@ async def get_action_listener( worker_name=req.worker_name, actions=req.actions, services=req.services, - slots=req.slots, labels=req.labels, + slot_config=req.slot_config, runtime_info=RuntimeInfo( sdk_version=version("hatchet_sdk"), language=SDKS.PYTHON, @@ -90,6 +92,26 @@ async def get_action_listener( return ActionListener(self.config, response.worker_id) + async def get_version(self) -> str: + """Call GetVersion RPC. Returns the engine semantic version string. + + Raises grpc.RpcError with UNIMPLEMENTED on older engines. + """ + if not self.aio_client: + aio_conn = new_conn(self.config, True) + self.aio_client = DispatcherStub(aio_conn) + + response = cast( + GetVersionResponse, + await self.aio_client.GetVersion( # type: ignore[misc] + GetVersionRequest(), + timeout=DEFAULT_REGISTER_TIMEOUT, + metadata=get_metadata(self.token), + ), + ) + + return response.version + async def send_step_action_event( self, action: Action, diff --git a/sdks/python/hatchet_sdk/clients/rest/__init__.py b/sdks/python/hatchet_sdk/clients/rest/__init__.py index 2a937e9e36..0ed1562583 100644 --- a/sdks/python/hatchet_sdk/clients/rest/__init__.py +++ b/sdks/python/hatchet_sdk/clients/rest/__init__.py @@ -341,6 +341,7 @@ V1WebhookHMACEncoding, ) from hatchet_sdk.clients.rest.models.v1_webhook_list import V1WebhookList +from hatchet_sdk.clients.rest.models.v1_webhook_response import V1WebhookResponse from hatchet_sdk.clients.rest.models.v1_webhook_source_name import V1WebhookSourceName from hatchet_sdk.clients.rest.models.v1_workflow_run import V1WorkflowRun from hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails @@ -374,6 +375,7 @@ from hatchet_sdk.clients.rest.models.worker_list import WorkerList from hatchet_sdk.clients.rest.models.worker_runtime_info import WorkerRuntimeInfo from hatchet_sdk.clients.rest.models.worker_runtime_sdks import WorkerRuntimeSDKs +from hatchet_sdk.clients.rest.models.worker_slot_config import WorkerSlotConfig from hatchet_sdk.clients.rest.models.worker_type import WorkerType from hatchet_sdk.clients.rest.models.workflow import Workflow from hatchet_sdk.clients.rest.models.workflow_concurrency import WorkflowConcurrency diff --git a/sdks/python/hatchet_sdk/clients/rest/api/webhook_api.py b/sdks/python/hatchet_sdk/clients/rest/api/webhook_api.py index c54ab27df0..89fc7448dd 100644 --- a/sdks/python/hatchet_sdk/clients/rest/api/webhook_api.py +++ b/sdks/python/hatchet_sdk/clients/rest/api/webhook_api.py @@ -17,7 +17,7 @@ from typing_extensions import Annotated from pydantic import Field, StrictInt, StrictStr -from typing import Any, Dict, List, Optional +from typing import List, Optional from typing_extensions import Annotated from hatchet_sdk.clients.rest.models.v1_create_webhook_request import ( V1CreateWebhookRequest, @@ -27,6 +27,7 @@ ) from hatchet_sdk.clients.rest.models.v1_webhook import V1Webhook from hatchet_sdk.clients.rest.models.v1_webhook_list import V1WebhookList +from hatchet_sdk.clients.rest.models.v1_webhook_response import V1WebhookResponse from hatchet_sdk.clients.rest.models.v1_webhook_source_name import V1WebhookSourceName from hatchet_sdk.clients.rest.api_client import ApiClient, RequestSerialized @@ -1296,7 +1297,7 @@ def v1_webhook_receive( _content_type: Optional[StrictStr] = None, _headers: Optional[Dict[StrictStr, Any]] = None, _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> Dict[str, object]: + ) -> V1WebhookResponse: """Post a webhook message Post an incoming webhook message @@ -1337,7 +1338,7 @@ def v1_webhook_receive( ) _response_types_map: Dict[str, Optional[str]] = { - "200": "Dict[str, object]", + "200": "V1WebhookResponse", "400": "APIErrors", "403": "APIErrors", } @@ -1371,7 +1372,7 @@ def v1_webhook_receive_with_http_info( _content_type: Optional[StrictStr] = None, _headers: Optional[Dict[StrictStr, Any]] = None, _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[Dict[str, object]]: + ) -> ApiResponse[V1WebhookResponse]: """Post a webhook message Post an incoming webhook message @@ -1412,7 +1413,7 @@ def v1_webhook_receive_with_http_info( ) _response_types_map: Dict[str, Optional[str]] = { - "200": "Dict[str, object]", + "200": "V1WebhookResponse", "400": "APIErrors", "403": "APIErrors", } @@ -1487,7 +1488,7 @@ def v1_webhook_receive_without_preload_content( ) _response_types_map: Dict[str, Optional[str]] = { - "200": "Dict[str, object]", + "200": "V1WebhookResponse", "400": "APIErrors", "403": "APIErrors", } diff --git a/sdks/python/hatchet_sdk/clients/rest/models/__init__.py b/sdks/python/hatchet_sdk/clients/rest/models/__init__.py index a0ffb63174..57b29bee5d 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/__init__.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/__init__.py @@ -304,6 +304,7 @@ V1WebhookHMACEncoding, ) from hatchet_sdk.clients.rest.models.v1_webhook_list import V1WebhookList +from hatchet_sdk.clients.rest.models.v1_webhook_response import V1WebhookResponse from hatchet_sdk.clients.rest.models.v1_webhook_source_name import V1WebhookSourceName from hatchet_sdk.clients.rest.models.v1_workflow_run import V1WorkflowRun from hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails @@ -337,6 +338,7 @@ from hatchet_sdk.clients.rest.models.worker_list import WorkerList from hatchet_sdk.clients.rest.models.worker_runtime_info import WorkerRuntimeInfo from hatchet_sdk.clients.rest.models.worker_runtime_sdks import WorkerRuntimeSDKs +from hatchet_sdk.clients.rest.models.worker_slot_config import WorkerSlotConfig from hatchet_sdk.clients.rest.models.worker_type import WorkerType from hatchet_sdk.clients.rest.models.workflow import Workflow from hatchet_sdk.clients.rest.models.workflow_concurrency import WorkflowConcurrency diff --git a/sdks/python/hatchet_sdk/clients/rest/models/step.py b/sdks/python/hatchet_sdk/clients/rest/models/step.py index 00d32b5e54..0b0e2c647f 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/step.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/step.py @@ -16,7 +16,7 @@ import re # noqa: F401 import json -from pydantic import BaseModel, ConfigDict, Field, StrictStr +from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr from typing import Any, ClassVar, Dict, List, Optional from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta from typing import Optional, Set @@ -38,6 +38,14 @@ class Step(BaseModel): timeout: Optional[StrictStr] = Field( default=None, description="The timeout of the step." ) + is_durable: Optional[StrictBool] = Field( + default=None, description="Whether the step is durable.", alias="isDurable" + ) + slot_requests: Optional[Dict[str, StrictInt]] = Field( + default=None, + description="Slot requests for the step (slot_type -> units).", + alias="slotRequests", + ) children: Optional[List[StrictStr]] = None parents: Optional[List[StrictStr]] = None __properties: ClassVar[List[str]] = [ @@ -47,6 +55,8 @@ class Step(BaseModel): "jobId", "action", "timeout", + "isDurable", + "slotRequests", "children", "parents", ] @@ -114,6 +124,8 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: "jobId": obj.get("jobId"), "action": obj.get("action"), "timeout": obj.get("timeout"), + "isDurable": obj.get("isDurable"), + "slotRequests": obj.get("slotRequests"), "children": obj.get("children"), "parents": obj.get("parents"), } diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_webhook_response.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_webhook_response.py new file mode 100644 index 0000000000..772b3ecf34 --- /dev/null +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_webhook_response.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" +Hatchet API + +The Hatchet API + +The version of the OpenAPI document: 1.0.0 +Generated by OpenAPI Generator (https://openapi-generator.tech) + +Do not edit the class manually. +""" # noqa: E501 + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from hatchet_sdk.clients.rest.models.v1_event import V1Event +from typing import Optional, Set +from typing_extensions import Self + + +class V1WebhookResponse(BaseModel): + """ + V1WebhookResponse + """ # noqa: E501 + + message: Optional[StrictStr] = Field( + default=None, description="The message for the webhook response" + ) + event: Optional[V1Event] = None + challenge: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["message", "event", "challenge"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of V1WebhookResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of event + if self.event: + _dict["event"] = self.event.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of V1WebhookResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "message": obj.get("message"), + "event": ( + V1Event.from_dict(obj["event"]) + if obj.get("event") is not None + else None + ), + "challenge": obj.get("challenge"), + } + ) + return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/worker.py b/sdks/python/hatchet_sdk/clients/rest/models/worker.py index 446917b722..e150818795 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/worker.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/worker.py @@ -17,7 +17,7 @@ import json from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr, field_validator +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator from typing import Any, ClassVar, Dict, List, Optional from typing_extensions import Annotated from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta @@ -26,6 +26,7 @@ from hatchet_sdk.clients.rest.models.semaphore_slots import SemaphoreSlots from hatchet_sdk.clients.rest.models.worker_label import WorkerLabel from hatchet_sdk.clients.rest.models.worker_runtime_info import WorkerRuntimeInfo +from hatchet_sdk.clients.rest.models.worker_slot_config import WorkerSlotConfig from hatchet_sdk.clients.rest.models.worker_type import WorkerType from typing import Optional, Set from typing_extensions import Self @@ -68,15 +69,10 @@ class Worker(BaseModel): status: Optional[StrictStr] = Field( default=None, description="The status of the worker." ) - max_runs: Optional[StrictInt] = Field( + slot_config: Optional[Dict[str, WorkerSlotConfig]] = Field( default=None, - description="The maximum number of runs this worker can execute concurrently.", - alias="maxRuns", - ) - available_runs: Optional[StrictInt] = Field( - default=None, - description="The number of runs this worker can execute concurrently.", - alias="availableRuns", + description="Slot availability and limits for this worker (slot_type -> { available, limit }).", + alias="slotConfig", ) dispatcher_id: Optional[ Annotated[str, Field(min_length=36, strict=True, max_length=36)] @@ -106,8 +102,7 @@ class Worker(BaseModel): "slots", "recentStepRuns", "status", - "maxRuns", - "availableRuns", + "slotConfig", "dispatcherId", "labels", "webhookUrl", @@ -188,6 +183,15 @@ def to_dict(self) -> Dict[str, Any]: if _item_recent_step_runs: _items.append(_item_recent_step_runs.to_dict()) _dict["recentStepRuns"] = _items + # override the default output from pydantic by calling `to_dict()` of each value in slot_config (dict) + _field_dict = {} + if self.slot_config: + for _key_slot_config in self.slot_config: + if self.slot_config[_key_slot_config]: + _field_dict[_key_slot_config] = self.slot_config[ + _key_slot_config + ].to_dict() + _dict["slotConfig"] = _field_dict # override the default output from pydantic by calling `to_dict()` of each item in labels (list) _items = [] if self.labels: @@ -240,8 +244,14 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: else None ), "status": obj.get("status"), - "maxRuns": obj.get("maxRuns"), - "availableRuns": obj.get("availableRuns"), + "slotConfig": ( + dict( + (_k, WorkerSlotConfig.from_dict(_v)) + for _k, _v in obj["slotConfig"].items() + ) + if obj.get("slotConfig") is not None + else None + ), "dispatcherId": obj.get("dispatcherId"), "labels": ( [WorkerLabel.from_dict(_item) for _item in obj["labels"]] diff --git a/sdks/python/hatchet_sdk/clients/rest/models/worker_slot_config.py b/sdks/python/hatchet_sdk/clients/rest/models/worker_slot_config.py new file mode 100644 index 0000000000..3196ef3022 --- /dev/null +++ b/sdks/python/hatchet_sdk/clients/rest/models/worker_slot_config.py @@ -0,0 +1,89 @@ +# coding: utf-8 + +""" +Hatchet API + +The Hatchet API + +The version of the OpenAPI document: 1.0.0 +Generated by OpenAPI Generator (https://openapi-generator.tech) + +Do not edit the class manually. +""" # noqa: E501 + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class WorkerSlotConfig(BaseModel): + """ + Slot availability and limits for a slot type. + """ # noqa: E501 + + available: Optional[StrictInt] = Field( + default=None, description="The number of available units for this slot type." + ) + limit: StrictInt = Field( + description="The maximum number of units for this slot type." + ) + __properties: ClassVar[List[str]] = ["available", "limit"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of WorkerSlotConfig from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of WorkerSlotConfig from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"available": obj.get("available"), "limit": obj.get("limit")} + ) + return _obj diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py index 07e4784454..7005606285 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py @@ -25,7 +25,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64ispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n\x0cWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_value\"\xcc\x01\n\x0bRuntimeInfo\x12\x18\n\x0bsdk_version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1d\n\x10language_version\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_sdk_versionB\x0b\n\t_languageB\x13\n\x11_language_versionB\x05\n\x03_osB\x08\n\x06_extra\"\xc1\x02\n\x15WorkerRegisterRequest\x12\x13\n\x0bworker_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x12\n\x05slots\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x17\n\nwebhook_id\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\'\n\x0cruntime_info\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x42\x08\n\x06_slotsB\r\n\x0b_webhook_idB\x0f\n\r_runtime_info\"S\n\x16WorkerRegisterResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x13\n\x0bworker_name\x18\x03 \x01(\t\"\xa4\x01\n\x19UpsertWorkerLabelsRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"B\n\x1aUpsertWorkerLabelsResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\x98\x05\n\x0e\x41ssignedAction\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x0e\n\x06job_id\x18\x04 \x01(\t\x12\x10\n\x08job_name\x18\x05 \x01(\t\x12\x12\n\njob_run_id\x18\x06 \x01(\t\x12\x0f\n\x07task_id\x18\x07 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x08 \x01(\t\x12\x11\n\taction_id\x18\t \x01(\t\x12 \n\x0b\x61\x63tion_type\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x16\n\x0e\x61\x63tion_payload\x18\x0b \x01(\t\x12\x11\n\ttask_name\x18\x0c \x01(\t\x12\x13\n\x0bretry_count\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x18\n\x0bworkflow_id\x18\x13 \x01(\tH\x04\x88\x01\x01\x12 \n\x13workflow_version_id\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\x0e\n\x0c_workflow_idB\x16\n\x14_workflow_version_id\"(\n\x13WorkerListenRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"-\n\x18WorkerUnsubscribeRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"A\n\x19WorkerUnsubscribeResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xec\x01\n\x13GroupKeyActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x11\n\taction_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\nevent_type\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x15\n\revent_payload\x18\x07 \x01(\t\"\xde\x02\n\x0fStepActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x05 \x01(\t\x12\x11\n\taction_id\x18\x06 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\nevent_type\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x15\n\revent_payload\x18\t \x01(\t\x12\x18\n\x0bretry_count\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1d\n\x10should_not_retry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_retry_countB\x13\n\x11_should_not_retry\";\n\x13\x41\x63tionEventResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xcc\x01\n SubscribeToWorkflowEventsRequest\x12\x1c\n\x0fworkflow_run_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_meta_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\"\n\x15\x61\x64\x64itional_meta_value\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x12\n\x10_workflow_run_idB\x16\n\x14_additional_meta_keyB\x18\n\x16_additional_meta_value\"9\n\x1eSubscribeToWorkflowRunsRequest\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"\xe7\x02\n\rWorkflowEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12$\n\rresource_type\x18\x02 \x01(\x0e\x32\r.ResourceType\x12&\n\nevent_type\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x13\n\x0bresource_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\revent_payload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x19\n\x0ctask_retries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x18\n\x0bretry_count\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x18\n\x0b\x65vent_index\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0f\n\r_task_retriesB\x0e\n\x0c_retry_countB\x0e\n\x0c_event_index\"\xac\x01\n\x10WorkflowRunEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12)\n\nevent_type\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x33\n\x0f\x65vent_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x92\x01\n\rStepRunResult\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"c\n\rOverridesData\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x17\n\x0f\x63\x61ller_filename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"W\n\x10HeartbeatRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x30\n\x0cheartbeat_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"S\n\x15RefreshTimeoutRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x1c\n\x14increment_timeout_by\x18\x02 \x01(\t\"H\n\x16RefreshTimeoutResponse\x12.\n\ntimeout_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x12ReleaseSlotRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse*A\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03\x12\x08\n\x04RUBY\x10\x04*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xf8\x06\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64ispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n\x0cWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_value\"\xcc\x01\n\x0bRuntimeInfo\x12\x18\n\x0bsdk_version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1d\n\x10language_version\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_sdk_versionB\x0b\n\t_languageB\x13\n\x11_language_versionB\x05\n\x03_osB\x08\n\x06_extra\"\xb1\x03\n\x15WorkerRegisterRequest\x12\x13\n\x0bworker_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x12\n\x05slots\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x17\n\nwebhook_id\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\'\n\x0cruntime_info\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x12;\n\x0bslot_config\x18\t \x03(\x0b\x32&.WorkerRegisterRequest.SlotConfigEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x1a\x31\n\x0fSlotConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_slotsB\r\n\x0b_webhook_idB\x0f\n\r_runtime_info\"S\n\x16WorkerRegisterResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x13\n\x0bworker_name\x18\x03 \x01(\t\"\xa4\x01\n\x19UpsertWorkerLabelsRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"B\n\x1aUpsertWorkerLabelsResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\x98\x05\n\x0e\x41ssignedAction\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x0e\n\x06job_id\x18\x04 \x01(\t\x12\x10\n\x08job_name\x18\x05 \x01(\t\x12\x12\n\njob_run_id\x18\x06 \x01(\t\x12\x0f\n\x07task_id\x18\x07 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x08 \x01(\t\x12\x11\n\taction_id\x18\t \x01(\t\x12 \n\x0b\x61\x63tion_type\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x16\n\x0e\x61\x63tion_payload\x18\x0b \x01(\t\x12\x11\n\ttask_name\x18\x0c \x01(\t\x12\x13\n\x0bretry_count\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x18\n\x0bworkflow_id\x18\x13 \x01(\tH\x04\x88\x01\x01\x12 \n\x13workflow_version_id\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\x0e\n\x0c_workflow_idB\x16\n\x14_workflow_version_id\"(\n\x13WorkerListenRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"-\n\x18WorkerUnsubscribeRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"A\n\x19WorkerUnsubscribeResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xec\x01\n\x13GroupKeyActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x11\n\taction_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\nevent_type\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x15\n\revent_payload\x18\x07 \x01(\t\"\xde\x02\n\x0fStepActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x05 \x01(\t\x12\x11\n\taction_id\x18\x06 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\nevent_type\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x15\n\revent_payload\x18\t \x01(\t\x12\x18\n\x0bretry_count\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1d\n\x10should_not_retry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_retry_countB\x13\n\x11_should_not_retry\";\n\x13\x41\x63tionEventResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xcc\x01\n SubscribeToWorkflowEventsRequest\x12\x1c\n\x0fworkflow_run_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_meta_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\"\n\x15\x61\x64\x64itional_meta_value\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x12\n\x10_workflow_run_idB\x16\n\x14_additional_meta_keyB\x18\n\x16_additional_meta_value\"9\n\x1eSubscribeToWorkflowRunsRequest\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"\xe7\x02\n\rWorkflowEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12$\n\rresource_type\x18\x02 \x01(\x0e\x32\r.ResourceType\x12&\n\nevent_type\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x13\n\x0bresource_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\revent_payload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x19\n\x0ctask_retries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x18\n\x0bretry_count\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x18\n\x0b\x65vent_index\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0f\n\r_task_retriesB\x0e\n\x0c_retry_countB\x0e\n\x0c_event_index\"\xac\x01\n\x10WorkflowRunEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12)\n\nevent_type\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x33\n\x0f\x65vent_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x92\x01\n\rStepRunResult\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"c\n\rOverridesData\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x17\n\x0f\x63\x61ller_filename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"W\n\x10HeartbeatRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x30\n\x0cheartbeat_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"S\n\x15RefreshTimeoutRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x1c\n\x14increment_timeout_by\x18\x02 \x01(\t\"H\n\x16RefreshTimeoutResponse\x12.\n\ntimeout_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x12ReleaseSlotRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse\"\x13\n\x11GetVersionRequest\"%\n\x12GetVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\t*A\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03\x12\x08\n\x04RUBY\x10\x04*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xb1\x07\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x12\x37\n\nGetVersion\x12\x12.GetVersionRequest\x1a\x13.GetVersionResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -35,78 +35,86 @@ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts' _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._loaded_options = None _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._serialized_options = b'8\001' + _globals['_WORKERREGISTERREQUEST_SLOTCONFIGENTRY']._loaded_options = None + _globals['_WORKERREGISTERREQUEST_SLOTCONFIGENTRY']._serialized_options = b'8\001' _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._loaded_options = None _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_options = b'8\001' - _globals['_SDKS']._serialized_start=3894 - _globals['_SDKS']._serialized_end=3959 - _globals['_ACTIONTYPE']._serialized_start=3961 - _globals['_ACTIONTYPE']._serialized_end=4039 - _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_start=4042 - _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_end=4204 - _globals['_STEPACTIONEVENTTYPE']._serialized_start=4207 - _globals['_STEPACTIONEVENTTYPE']._serialized_end=4379 - _globals['_RESOURCETYPE']._serialized_start=4381 - _globals['_RESOURCETYPE']._serialized_end=4482 - _globals['_RESOURCEEVENTTYPE']._serialized_start=4485 - _globals['_RESOURCEEVENTTYPE']._serialized_end=4739 - _globals['_WORKFLOWRUNEVENTTYPE']._serialized_start=4741 - _globals['_WORKFLOWRUNEVENTTYPE']._serialized_end=4801 + _globals['_SDKS']._serialized_start=4066 + _globals['_SDKS']._serialized_end=4131 + _globals['_ACTIONTYPE']._serialized_start=4133 + _globals['_ACTIONTYPE']._serialized_end=4211 + _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_start=4214 + _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_end=4376 + _globals['_STEPACTIONEVENTTYPE']._serialized_start=4379 + _globals['_STEPACTIONEVENTTYPE']._serialized_end=4551 + _globals['_RESOURCETYPE']._serialized_start=4553 + _globals['_RESOURCETYPE']._serialized_end=4654 + _globals['_RESOURCEEVENTTYPE']._serialized_start=4657 + _globals['_RESOURCEEVENTTYPE']._serialized_end=4911 + _globals['_WORKFLOWRUNEVENTTYPE']._serialized_start=4913 + _globals['_WORKFLOWRUNEVENTTYPE']._serialized_end=4973 _globals['_WORKERLABELS']._serialized_start=53 _globals['_WORKERLABELS']._serialized_end=143 _globals['_RUNTIMEINFO']._serialized_start=146 _globals['_RUNTIMEINFO']._serialized_end=350 _globals['_WORKERREGISTERREQUEST']._serialized_start=353 - _globals['_WORKERREGISTERREQUEST']._serialized_end=674 - _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._serialized_start=572 - _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._serialized_end=632 - _globals['_WORKERREGISTERRESPONSE']._serialized_start=676 - _globals['_WORKERREGISTERRESPONSE']._serialized_end=759 - _globals['_UPSERTWORKERLABELSREQUEST']._serialized_start=762 - _globals['_UPSERTWORKERLABELSREQUEST']._serialized_end=926 - _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_start=572 - _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_end=632 - _globals['_UPSERTWORKERLABELSRESPONSE']._serialized_start=928 - _globals['_UPSERTWORKERLABELSRESPONSE']._serialized_end=994 - _globals['_ASSIGNEDACTION']._serialized_start=997 - _globals['_ASSIGNEDACTION']._serialized_end=1661 - _globals['_WORKERLISTENREQUEST']._serialized_start=1663 - _globals['_WORKERLISTENREQUEST']._serialized_end=1703 - _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_start=1705 - _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_end=1750 - _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_start=1752 - _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_end=1817 - _globals['_GROUPKEYACTIONEVENT']._serialized_start=1820 - _globals['_GROUPKEYACTIONEVENT']._serialized_end=2056 - _globals['_STEPACTIONEVENT']._serialized_start=2059 - _globals['_STEPACTIONEVENT']._serialized_end=2409 - _globals['_ACTIONEVENTRESPONSE']._serialized_start=2411 - _globals['_ACTIONEVENTRESPONSE']._serialized_end=2470 - _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_start=2473 - _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_end=2677 - _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_start=2679 - _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_end=2736 - _globals['_WORKFLOWEVENT']._serialized_start=2739 - _globals['_WORKFLOWEVENT']._serialized_end=3098 - _globals['_WORKFLOWRUNEVENT']._serialized_start=3101 - _globals['_WORKFLOWRUNEVENT']._serialized_end=3273 - _globals['_STEPRUNRESULT']._serialized_start=3276 - _globals['_STEPRUNRESULT']._serialized_end=3422 - _globals['_OVERRIDESDATA']._serialized_start=3424 - _globals['_OVERRIDESDATA']._serialized_end=3523 - _globals['_OVERRIDESDATARESPONSE']._serialized_start=3525 - _globals['_OVERRIDESDATARESPONSE']._serialized_end=3548 - _globals['_HEARTBEATREQUEST']._serialized_start=3550 - _globals['_HEARTBEATREQUEST']._serialized_end=3637 - _globals['_HEARTBEATRESPONSE']._serialized_start=3639 - _globals['_HEARTBEATRESPONSE']._serialized_end=3658 - _globals['_REFRESHTIMEOUTREQUEST']._serialized_start=3660 - _globals['_REFRESHTIMEOUTREQUEST']._serialized_end=3743 - _globals['_REFRESHTIMEOUTRESPONSE']._serialized_start=3745 - _globals['_REFRESHTIMEOUTRESPONSE']._serialized_end=3817 - _globals['_RELEASESLOTREQUEST']._serialized_start=3819 - _globals['_RELEASESLOTREQUEST']._serialized_end=3869 - _globals['_RELEASESLOTRESPONSE']._serialized_start=3871 - _globals['_RELEASESLOTRESPONSE']._serialized_end=3892 - _globals['_DISPATCHER']._serialized_start=4804 - _globals['_DISPATCHER']._serialized_end=5692 + _globals['_WORKERREGISTERREQUEST']._serialized_end=786 + _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._serialized_start=633 + _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._serialized_end=693 + _globals['_WORKERREGISTERREQUEST_SLOTCONFIGENTRY']._serialized_start=695 + _globals['_WORKERREGISTERREQUEST_SLOTCONFIGENTRY']._serialized_end=744 + _globals['_WORKERREGISTERRESPONSE']._serialized_start=788 + _globals['_WORKERREGISTERRESPONSE']._serialized_end=871 + _globals['_UPSERTWORKERLABELSREQUEST']._serialized_start=874 + _globals['_UPSERTWORKERLABELSREQUEST']._serialized_end=1038 + _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_start=633 + _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_end=693 + _globals['_UPSERTWORKERLABELSRESPONSE']._serialized_start=1040 + _globals['_UPSERTWORKERLABELSRESPONSE']._serialized_end=1106 + _globals['_ASSIGNEDACTION']._serialized_start=1109 + _globals['_ASSIGNEDACTION']._serialized_end=1773 + _globals['_WORKERLISTENREQUEST']._serialized_start=1775 + _globals['_WORKERLISTENREQUEST']._serialized_end=1815 + _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_start=1817 + _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_end=1862 + _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_start=1864 + _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_end=1929 + _globals['_GROUPKEYACTIONEVENT']._serialized_start=1932 + _globals['_GROUPKEYACTIONEVENT']._serialized_end=2168 + _globals['_STEPACTIONEVENT']._serialized_start=2171 + _globals['_STEPACTIONEVENT']._serialized_end=2521 + _globals['_ACTIONEVENTRESPONSE']._serialized_start=2523 + _globals['_ACTIONEVENTRESPONSE']._serialized_end=2582 + _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_start=2585 + _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_end=2789 + _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_start=2791 + _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_end=2848 + _globals['_WORKFLOWEVENT']._serialized_start=2851 + _globals['_WORKFLOWEVENT']._serialized_end=3210 + _globals['_WORKFLOWRUNEVENT']._serialized_start=3213 + _globals['_WORKFLOWRUNEVENT']._serialized_end=3385 + _globals['_STEPRUNRESULT']._serialized_start=3388 + _globals['_STEPRUNRESULT']._serialized_end=3534 + _globals['_OVERRIDESDATA']._serialized_start=3536 + _globals['_OVERRIDESDATA']._serialized_end=3635 + _globals['_OVERRIDESDATARESPONSE']._serialized_start=3637 + _globals['_OVERRIDESDATARESPONSE']._serialized_end=3660 + _globals['_HEARTBEATREQUEST']._serialized_start=3662 + _globals['_HEARTBEATREQUEST']._serialized_end=3749 + _globals['_HEARTBEATRESPONSE']._serialized_start=3751 + _globals['_HEARTBEATRESPONSE']._serialized_end=3770 + _globals['_REFRESHTIMEOUTREQUEST']._serialized_start=3772 + _globals['_REFRESHTIMEOUTREQUEST']._serialized_end=3855 + _globals['_REFRESHTIMEOUTRESPONSE']._serialized_start=3857 + _globals['_REFRESHTIMEOUTRESPONSE']._serialized_end=3929 + _globals['_RELEASESLOTREQUEST']._serialized_start=3931 + _globals['_RELEASESLOTREQUEST']._serialized_end=3981 + _globals['_RELEASESLOTRESPONSE']._serialized_start=3983 + _globals['_RELEASESLOTRESPONSE']._serialized_end=4004 + _globals['_GETVERSIONREQUEST']._serialized_start=4006 + _globals['_GETVERSIONREQUEST']._serialized_end=4025 + _globals['_GETVERSIONRESPONSE']._serialized_start=4027 + _globals['_GETVERSIONRESPONSE']._serialized_end=4064 + _globals['_DISPATCHER']._serialized_start=4976 + _globals['_DISPATCHER']._serialized_end=5921 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi index 74d27a1654..393db66c5c 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi @@ -110,7 +110,7 @@ class RuntimeInfo(_message.Message): def __init__(self, sdk_version: _Optional[str] = ..., language: _Optional[_Union[SDKS, str]] = ..., language_version: _Optional[str] = ..., os: _Optional[str] = ..., extra: _Optional[str] = ...) -> None: ... class WorkerRegisterRequest(_message.Message): - __slots__ = ("worker_name", "actions", "services", "slots", "labels", "webhook_id", "runtime_info") + __slots__ = ("worker_name", "actions", "services", "slots", "labels", "webhook_id", "runtime_info", "slot_config") class LabelsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -118,6 +118,13 @@ class WorkerRegisterRequest(_message.Message): key: str value: WorkerLabels def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[WorkerLabels, _Mapping]] = ...) -> None: ... + class SlotConfigEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: int + def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... WORKER_NAME_FIELD_NUMBER: _ClassVar[int] ACTIONS_FIELD_NUMBER: _ClassVar[int] SERVICES_FIELD_NUMBER: _ClassVar[int] @@ -125,6 +132,7 @@ class WorkerRegisterRequest(_message.Message): LABELS_FIELD_NUMBER: _ClassVar[int] WEBHOOK_ID_FIELD_NUMBER: _ClassVar[int] RUNTIME_INFO_FIELD_NUMBER: _ClassVar[int] + SLOT_CONFIG_FIELD_NUMBER: _ClassVar[int] worker_name: str actions: _containers.RepeatedScalarFieldContainer[str] services: _containers.RepeatedScalarFieldContainer[str] @@ -132,7 +140,8 @@ class WorkerRegisterRequest(_message.Message): labels: _containers.MessageMap[str, WorkerLabels] webhook_id: str runtime_info: RuntimeInfo - def __init__(self, worker_name: _Optional[str] = ..., actions: _Optional[_Iterable[str]] = ..., services: _Optional[_Iterable[str]] = ..., slots: _Optional[int] = ..., labels: _Optional[_Mapping[str, WorkerLabels]] = ..., webhook_id: _Optional[str] = ..., runtime_info: _Optional[_Union[RuntimeInfo, _Mapping]] = ...) -> None: ... + slot_config: _containers.ScalarMap[str, int] + def __init__(self, worker_name: _Optional[str] = ..., actions: _Optional[_Iterable[str]] = ..., services: _Optional[_Iterable[str]] = ..., slots: _Optional[int] = ..., labels: _Optional[_Mapping[str, WorkerLabels]] = ..., webhook_id: _Optional[str] = ..., runtime_info: _Optional[_Union[RuntimeInfo, _Mapping]] = ..., slot_config: _Optional[_Mapping[str, int]] = ...) -> None: ... class WorkerRegisterResponse(_message.Message): __slots__ = ("tenant_id", "worker_id", "worker_name") @@ -400,3 +409,13 @@ class ReleaseSlotRequest(_message.Message): class ReleaseSlotResponse(_message.Message): __slots__ = () def __init__(self) -> None: ... + +class GetVersionRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetVersionResponse(_message.Message): + __slots__ = ("version",) + VERSION_FIELD_NUMBER: _ClassVar[int] + version: str + def __init__(self, version: _Optional[str] = ...) -> None: ... diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py index 30fb814d6b..dd51b9f62a 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py @@ -99,6 +99,11 @@ def __init__(self, channel: grpc.Channel | grpc.aio.Channel) -> None: request_serializer=dispatcher__pb2.UpsertWorkerLabelsRequest.SerializeToString, response_deserializer=dispatcher__pb2.UpsertWorkerLabelsResponse.FromString, _registered_method=True) + self.GetVersion = channel.unary_unary( + '/Dispatcher/GetVersion', + request_serializer=dispatcher__pb2.GetVersionRequest.SerializeToString, + response_deserializer=dispatcher__pb2.GetVersionResponse.FromString, + _registered_method=True) class DispatcherServicer(object): @@ -185,6 +190,15 @@ def UpsertWorkerLabels(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def GetVersion(self, request, context): + """GetVersion returns the dispatcher protocol version as a simple integer. + SDKs use this to determine feature support (e.g. slot_config registration). + Old engines that do not implement this RPC will return UNIMPLEMENTED. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_DispatcherServicer_to_server(servicer, server): rpc_method_handlers = { @@ -253,6 +267,11 @@ def add_DispatcherServicer_to_server(servicer, server): request_deserializer=dispatcher__pb2.UpsertWorkerLabelsRequest.FromString, response_serializer=dispatcher__pb2.UpsertWorkerLabelsResponse.SerializeToString, ), + 'GetVersion': grpc.unary_unary_rpc_method_handler( + servicer.GetVersion, + request_deserializer=dispatcher__pb2.GetVersionRequest.FromString, + response_serializer=dispatcher__pb2.GetVersionResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'Dispatcher', rpc_method_handlers) @@ -614,3 +633,30 @@ def UpsertWorkerLabels(request, timeout, metadata, _registered_method=True) + + @staticmethod + def GetVersion(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/Dispatcher/GetVersion', + dispatcher__pb2.GetVersionRequest.SerializeToString, + dispatcher__pb2.GetVersionResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py index 0b655e0780..0aec82e8c4 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py @@ -26,7 +26,7 @@ from hatchet_sdk.contracts.v1.shared import condition_pb2 as v1_dot_shared_dot_condition__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"[\n\x12\x43\x61ncelTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"[\n\x12ReplayTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\x82\x01\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xac\x04\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilter\x12\x1e\n\x11input_json_schema\x18\x0e \x01(\x0cH\x04\x88\x01\x01\x42\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priorityB\x14\n\x12_input_json_schema\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe8\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb1\x04\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t\"+\n\x14GetRunDetailsRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\x96\x01\n\rTaskRunDetail\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12\x13\n\x0breadable_id\x18\x05 \x01(\tB\x08\n\x06_errorB\t\n\x07_output\"\xf0\x01\n\x15GetRunDetailsResponse\x12\r\n\x05input\x18\x01 \x01(\x0c\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12:\n\ttask_runs\x18\x03 \x03(\x0b\x32\'.v1.GetRunDetailsResponse.TaskRunsEntry\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x01(\x0c\x1a\x42\n\rTaskRunsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.v1.TaskRunDetail:\x02\x38\x01*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*N\n\tRunStatus\x12\n\n\x06QUEUED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xfd\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponse\x12\x44\n\rGetRunDetails\x12\x18.v1.GetRunDetailsRequest\x1a\x19.v1.GetRunDetailsResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"[\n\x12\x43\x61ncelTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"[\n\x12ReplayTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\x82\x01\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xac\x04\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilter\x12\x1e\n\x11input_json_schema\x18\x0e \x01(\x0cH\x04\x88\x01\x01\x42\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priorityB\x14\n\x12_input_json_schema\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe8\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb7\x05\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x12\x12\n\nis_durable\x18\x0e \x01(\x08\x12;\n\rslot_requests\x18\x0f \x03(\x0b\x32$.v1.CreateTaskOpts.SlotRequestsEntry\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x1a\x33\n\x11SlotRequestsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t\"+\n\x14GetRunDetailsRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\x96\x01\n\rTaskRunDetail\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12\x13\n\x0breadable_id\x18\x05 \x01(\tB\x08\n\x06_errorB\t\n\x07_output\"\xf0\x01\n\x15GetRunDetailsResponse\x12\r\n\x05input\x18\x01 \x01(\x0c\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12:\n\ttask_runs\x18\x03 \x03(\x0b\x32\'.v1.GetRunDetailsResponse.TaskRunsEntry\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x01(\x0c\x1a\x42\n\rTaskRunsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.v1.TaskRunDetail:\x02\x38\x01*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*N\n\tRunStatus\x12\n\n\x06QUEUED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xfd\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponse\x12\x44\n\rGetRunDetails\x12\x18.v1.GetRunDetailsRequest\x1a\x19.v1.GetRunDetailsResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -36,18 +36,20 @@ _globals['DESCRIPTOR']._serialized_options = b'Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1' _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._loaded_options = None _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_options = b'8\001' + _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._loaded_options = None + _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_options = b'8\001' _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._loaded_options = None _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_options = b'8\001' - _globals['_STICKYSTRATEGY']._serialized_start=3094 - _globals['_STICKYSTRATEGY']._serialized_end=3130 - _globals['_RATELIMITDURATION']._serialized_start=3132 - _globals['_RATELIMITDURATION']._serialized_end=3225 - _globals['_RUNSTATUS']._serialized_start=3227 - _globals['_RUNSTATUS']._serialized_end=3305 - _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_start=3307 - _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_end=3434 - _globals['_WORKERLABELCOMPARATOR']._serialized_start=3437 - _globals['_WORKERLABELCOMPARATOR']._serialized_end=3570 + _globals['_STICKYSTRATEGY']._serialized_start=3228 + _globals['_STICKYSTRATEGY']._serialized_end=3264 + _globals['_RATELIMITDURATION']._serialized_start=3266 + _globals['_RATELIMITDURATION']._serialized_end=3359 + _globals['_RUNSTATUS']._serialized_start=3361 + _globals['_RUNSTATUS']._serialized_end=3439 + _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_start=3441 + _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_end=3568 + _globals['_WORKERLABELCOMPARATOR']._serialized_start=3571 + _globals['_WORKERLABELCOMPARATOR']._serialized_end=3704 _globals['_CANCELTASKSREQUEST']._serialized_start=86 _globals['_CANCELTASKSREQUEST']._serialized_end=177 _globals['_REPLAYTASKSREQUEST']._serialized_start=179 @@ -71,21 +73,23 @@ _globals['_DESIREDWORKERLABELS']._serialized_start=1533 _globals['_DESIREDWORKERLABELS']._serialized_end=1765 _globals['_CREATETASKOPTS']._serialized_start=1768 - _globals['_CREATETASKOPTS']._serialized_end=2329 - _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_start=2174 - _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_end=2250 - _globals['_CREATETASKRATELIMIT']._serialized_start=2332 - _globals['_CREATETASKRATELIMIT']._serialized_end=2585 - _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_start=2587 - _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_end=2651 - _globals['_GETRUNDETAILSREQUEST']._serialized_start=2653 - _globals['_GETRUNDETAILSREQUEST']._serialized_end=2696 - _globals['_TASKRUNDETAIL']._serialized_start=2699 - _globals['_TASKRUNDETAIL']._serialized_end=2849 - _globals['_GETRUNDETAILSRESPONSE']._serialized_start=2852 - _globals['_GETRUNDETAILSRESPONSE']._serialized_end=3092 - _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_start=3026 - _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_end=3092 - _globals['_ADMINSERVICE']._serialized_start=3573 - _globals['_ADMINSERVICE']._serialized_end=3954 + _globals['_CREATETASKOPTS']._serialized_end=2463 + _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_start=2255 + _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_end=2331 + _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_start=2333 + _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_end=2384 + _globals['_CREATETASKRATELIMIT']._serialized_start=2466 + _globals['_CREATETASKRATELIMIT']._serialized_end=2719 + _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_start=2721 + _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_end=2785 + _globals['_GETRUNDETAILSREQUEST']._serialized_start=2787 + _globals['_GETRUNDETAILSREQUEST']._serialized_end=2830 + _globals['_TASKRUNDETAIL']._serialized_start=2833 + _globals['_TASKRUNDETAIL']._serialized_end=2983 + _globals['_GETRUNDETAILSRESPONSE']._serialized_start=2986 + _globals['_GETRUNDETAILSRESPONSE']._serialized_end=3226 + _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_start=3160 + _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_end=3226 + _globals['_ADMINSERVICE']._serialized_start=3707 + _globals['_ADMINSERVICE']._serialized_end=4088 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi index 412e0e05ff..8a86ad4837 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi @@ -203,7 +203,7 @@ class DesiredWorkerLabels(_message.Message): def __init__(self, str_value: _Optional[str] = ..., int_value: _Optional[int] = ..., required: bool = ..., comparator: _Optional[_Union[WorkerLabelComparator, str]] = ..., weight: _Optional[int] = ...) -> None: ... class CreateTaskOpts(_message.Message): - __slots__ = ("readable_id", "action", "timeout", "inputs", "parents", "retries", "rate_limits", "worker_labels", "backoff_factor", "backoff_max_seconds", "concurrency", "conditions", "schedule_timeout") + __slots__ = ("readable_id", "action", "timeout", "inputs", "parents", "retries", "rate_limits", "worker_labels", "backoff_factor", "backoff_max_seconds", "concurrency", "conditions", "schedule_timeout", "is_durable", "slot_requests") class WorkerLabelsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -211,6 +211,13 @@ class CreateTaskOpts(_message.Message): key: str value: DesiredWorkerLabels def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[DesiredWorkerLabels, _Mapping]] = ...) -> None: ... + class SlotRequestsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: int + def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... READABLE_ID_FIELD_NUMBER: _ClassVar[int] ACTION_FIELD_NUMBER: _ClassVar[int] TIMEOUT_FIELD_NUMBER: _ClassVar[int] @@ -224,6 +231,8 @@ class CreateTaskOpts(_message.Message): CONCURRENCY_FIELD_NUMBER: _ClassVar[int] CONDITIONS_FIELD_NUMBER: _ClassVar[int] SCHEDULE_TIMEOUT_FIELD_NUMBER: _ClassVar[int] + IS_DURABLE_FIELD_NUMBER: _ClassVar[int] + SLOT_REQUESTS_FIELD_NUMBER: _ClassVar[int] readable_id: str action: str timeout: str @@ -237,7 +246,9 @@ class CreateTaskOpts(_message.Message): concurrency: _containers.RepeatedCompositeFieldContainer[Concurrency] conditions: _condition_pb2.TaskConditions schedule_timeout: str - def __init__(self, readable_id: _Optional[str] = ..., action: _Optional[str] = ..., timeout: _Optional[str] = ..., inputs: _Optional[str] = ..., parents: _Optional[_Iterable[str]] = ..., retries: _Optional[int] = ..., rate_limits: _Optional[_Iterable[_Union[CreateTaskRateLimit, _Mapping]]] = ..., worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ..., backoff_factor: _Optional[float] = ..., backoff_max_seconds: _Optional[int] = ..., concurrency: _Optional[_Iterable[_Union[Concurrency, _Mapping]]] = ..., conditions: _Optional[_Union[_condition_pb2.TaskConditions, _Mapping]] = ..., schedule_timeout: _Optional[str] = ...) -> None: ... + is_durable: bool + slot_requests: _containers.ScalarMap[str, int] + def __init__(self, readable_id: _Optional[str] = ..., action: _Optional[str] = ..., timeout: _Optional[str] = ..., inputs: _Optional[str] = ..., parents: _Optional[_Iterable[str]] = ..., retries: _Optional[int] = ..., rate_limits: _Optional[_Iterable[_Union[CreateTaskRateLimit, _Mapping]]] = ..., worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ..., backoff_factor: _Optional[float] = ..., backoff_max_seconds: _Optional[int] = ..., concurrency: _Optional[_Iterable[_Union[Concurrency, _Mapping]]] = ..., conditions: _Optional[_Union[_condition_pb2.TaskConditions, _Mapping]] = ..., schedule_timeout: _Optional[str] = ..., is_durable: bool = ..., slot_requests: _Optional[_Mapping[str, int]] = ...) -> None: ... class CreateTaskRateLimit(_message.Message): __slots__ = ("key", "units", "key_expr", "units_expr", "limit_values_expr", "duration") diff --git a/sdks/python/hatchet_sdk/deprecated/__init__.py b/sdks/python/hatchet_sdk/deprecated/__init__.py new file mode 100644 index 0000000000..09d7c92333 --- /dev/null +++ b/sdks/python/hatchet_sdk/deprecated/__init__.py @@ -0,0 +1,2 @@ +# Legacy worker registration code for backward compatibility with older Hatchet engines +# that do not support the slot_config-based registration (pre v0.76.0). diff --git a/sdks/python/hatchet_sdk/deprecated/action_listener.py b/sdks/python/hatchet_sdk/deprecated/action_listener.py new file mode 100644 index 0000000000..5353e1f4e1 --- /dev/null +++ b/sdks/python/hatchet_sdk/deprecated/action_listener.py @@ -0,0 +1,29 @@ +"""Legacy GetActionListenerRequest using slots: int (pre-slot-config engines).""" + +from pydantic import BaseModel, ConfigDict, Field, model_validator + +from hatchet_sdk.contracts.dispatcher_pb2 import WorkerLabels + + +class LegacyGetActionListenerRequest(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + worker_name: str + services: list[str] + actions: list[str] + slots: int + raw_labels: dict[str, str | int] = Field(default_factory=dict) + + labels: dict[str, WorkerLabels] = Field(default_factory=dict) + + @model_validator(mode="after") + def validate_labels(self) -> "LegacyGetActionListenerRequest": + self.labels = {} + + for key, value in self.raw_labels.items(): + if isinstance(value, int): + self.labels[key] = WorkerLabels(int_value=value) + else: + self.labels[key] = WorkerLabels(str_value=str(value)) + + return self diff --git a/sdks/python/hatchet_sdk/deprecated/action_listener_process.py b/sdks/python/hatchet_sdk/deprecated/action_listener_process.py new file mode 100644 index 0000000000..d599ec00ce --- /dev/null +++ b/sdks/python/hatchet_sdk/deprecated/action_listener_process.py @@ -0,0 +1,510 @@ +"""Legacy action listener process using slots: int (pre-slot-config engines).""" + +import asyncio +import contextlib +import logging +import signal +import time +from datetime import timedelta +from multiprocessing import Queue + +import grpc +from aiohttp import web +from aiohttp.web_request import Request +from aiohttp.web_response import Response +from grpc.aio import UnaryUnaryCall +from prometheus_client import Gauge, generate_latest + +from hatchet_sdk.client import Client +from hatchet_sdk.clients.dispatcher.action_listener import ActionListener +from hatchet_sdk.clients.rest.models.update_worker_request import UpdateWorkerRequest +from hatchet_sdk.config import ClientConfig +from hatchet_sdk.contracts.dispatcher_pb2 import ( + STEP_EVENT_TYPE_STARTED, + ActionEventResponse, + StepActionEvent, +) +from hatchet_sdk.deprecated.action_listener import LegacyGetActionListenerRequest +from hatchet_sdk.deprecated.dispatcher import legacy_get_action_listener +from hatchet_sdk.logger import logger +from hatchet_sdk.runnables.action import Action, ActionType +from hatchet_sdk.runnables.contextvars import ( + ctx_action_key, + ctx_step_run_id, + ctx_task_retry_count, + ctx_worker_id, + ctx_workflow_run_id, +) +from hatchet_sdk.utils.backoff import exp_backoff_sleep +from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE +from hatchet_sdk.worker.action_listener_process import ( + BLOCKED_THREAD_WARNING, + STARTING_UNHEALTHY_AFTER_SECONDS, + ActionEvent, + HealthStatus, +) + +ACTION_EVENT_RETRY_COUNT = 5 + + +class LegacyWorkerActionListenerProcess: + """Worker action listener process that uses the legacy slots: int registration.""" + + def __init__( + self, + name: str, + actions: list[str], + slots: int, + config: ClientConfig, + action_queue: "Queue[Action]", + event_queue: "Queue[ActionEvent | STOP_LOOP_TYPE]", + handle_kill: bool, + debug: bool, + labels: dict[str, str | int], + ) -> None: + self.name = name + self.actions = actions + self.slots = slots + self.config = config + self.action_queue = action_queue + self.event_queue = event_queue + self.debug = debug + self.labels = labels + self.handle_kill = handle_kill + + self._health_runner: web.AppRunner | None = None + self._listener_health_gauge: Gauge | None = None + self._event_loop_lag_gauge: Gauge | None = None + self._event_loop_monitor_task: asyncio.Task[None] | None = None + self._event_loop_last_lag_seconds: float = 0.0 + self._event_loop_blocked_since: float | None = None + self._waiting_steps_blocked_since: float | None = None + self._starting_since: float = time.time() + + self.listener: ActionListener | None = None + self.killing = False + self.action_loop_task: asyncio.Task[None] | None = None + self.event_send_loop_task: asyncio.Task[None] | None = None + self.running_step_runs: dict[str, float] = {} + self.step_action_events: set[ + asyncio.Task[UnaryUnaryCall[StepActionEvent, ActionEventResponse] | None] + ] = set() + + if self.debug: + logger.setLevel(logging.DEBUG) + + self.client = Client(config=self.config, debug=self.debug) + + loop = asyncio.get_event_loop() + loop.add_signal_handler( + signal.SIGINT, lambda: asyncio.create_task(self.pause_task_assignment()) + ) + loop.add_signal_handler( + signal.SIGTERM, lambda: asyncio.create_task(self.pause_task_assignment()) + ) + loop.add_signal_handler( + signal.SIGQUIT, lambda: asyncio.create_task(self.exit_gracefully()) + ) + + if self.config.healthcheck.enabled: + self._listener_health_gauge = Gauge( + "hatchet_worker_listener_health", + "Listener health (1 healthy, 0 unhealthy)", + ) + self._event_loop_lag_gauge = Gauge( + "hatchet_worker_event_loop_lag_seconds", + "Event loop lag in seconds (listener process)", + ) + + async def _monitor_event_loop(self) -> None: + interval = 0.5 + while not self.killing: + start = time.time() + await asyncio.sleep(interval) + elapsed = time.time() - start + lag = max(0.0, elapsed - interval) + if ( + timedelta(seconds=lag) + >= self.config.healthcheck.event_loop_block_threshold_seconds + ): + if self._event_loop_blocked_since is None: + self._event_loop_blocked_since = start + interval + self._event_loop_last_lag_seconds = max( + lag, time.time() - self._event_loop_blocked_since + ) + else: + self._event_loop_last_lag_seconds = lag + + if ( + timedelta(seconds=lag) + < self.config.healthcheck.event_loop_block_threshold_seconds + ): + self._event_loop_blocked_since = None + + def _starting_timed_out(self) -> bool: + return (time.time() - self._starting_since) > STARTING_UNHEALTHY_AFTER_SECONDS + + def _compute_health(self) -> HealthStatus: + if self.killing: + return HealthStatus.UNHEALTHY + + if ( + self._event_loop_blocked_since is not None + and timedelta(seconds=(time.time() - self._event_loop_blocked_since)) + > self.config.healthcheck.event_loop_block_threshold_seconds + ): + return HealthStatus.UNHEALTHY + + if ( + self._waiting_steps_blocked_since is not None + and timedelta(seconds=(time.time() - self._waiting_steps_blocked_since)) + > self.config.healthcheck.event_loop_block_threshold_seconds + ): + return HealthStatus.UNHEALTHY + + if self.listener is None: + if self._starting_timed_out(): + return HealthStatus.UNHEALTHY + return HealthStatus.STARTING + + listener = self.listener + + last_attempt = listener.last_connection_attempt or 0.0 + if last_attempt <= 0: + if self._starting_timed_out(): + return HealthStatus.UNHEALTHY + return HealthStatus.STARTING + + if listener.listen_strategy == "v2": + now = time.time() + time_last_hb = listener.time_last_hb_succeeded or 0.0 + has_hb_success = 0.0 < time_last_hb <= now + ok = bool( + listener.heartbeat_task is not None + and listener.last_heartbeat_succeeded + and has_hb_success + ) + else: + ok = bool(listener.retries == 0) + + return HealthStatus.HEALTHY if ok else HealthStatus.UNHEALTHY + + async def _health_handler(self, request: Request) -> Response: + status = self._compute_health() + ok = status == HealthStatus.HEALTHY + response = {"status": status.value} + return web.json_response(response, status=200 if ok else 503) + + async def _metrics_handler(self, request: Request) -> Response: + status = self._compute_health() + ok = status == HealthStatus.HEALTHY + + if self._listener_health_gauge is not None: + self._listener_health_gauge.set(1 if ok else 0) + + if self._event_loop_lag_gauge is not None: + self._event_loop_lag_gauge.set(self._event_loop_last_lag_seconds) + + return web.Response(body=generate_latest(), content_type="text/plain") + + async def start_health_server(self) -> None: + if not self.config.healthcheck.enabled: + return + + if self._health_runner is not None: + return + + app = web.Application() + app.add_routes( + [ + web.get("/health", self._health_handler), + web.get("/metrics", self._metrics_handler), + ] + ) + + runner = web.AppRunner(app) + + try: + await runner.setup() + await web.TCPSite( + runner, + host=self.config.healthcheck.bind_address, + port=self.config.healthcheck.port, + ).start() + except Exception: + logger.exception("failed to start healthcheck server (listener process)") + return + + self._health_runner = runner + logger.info( + f"healthcheck server (listener process) running on {self.config.healthcheck.bind_address}:{self.config.healthcheck.port}" + ) + + if self._event_loop_monitor_task is None: + self._event_loop_monitor_task = asyncio.create_task( + self._monitor_event_loop() + ) + + async def stop_health_server(self) -> None: + if self._event_loop_monitor_task is not None: + task = self._event_loop_monitor_task + self._event_loop_monitor_task = None + task.cancel() + with contextlib.suppress(asyncio.CancelledError): + await task + + if self._health_runner is None: + return + + try: + await self._health_runner.cleanup() + except Exception: + logger.exception("failed to stop healthcheck server (listener process)") + finally: + self._health_runner = None + + async def pause_task_assignment(self) -> None: + if self.listener is None: + raise ValueError("listener not started") + + await self.client.workers.aio_update( + worker_id=self.listener.worker_id, + opts=UpdateWorkerRequest(isPaused=True), + ) + + async def start(self, retry_attempt: int = 0) -> None: + if retry_attempt > 5: + logger.error("could not start action listener") + return + + logger.debug(f"starting action listener: {self.name}") + + try: + from hatchet_sdk.clients.dispatcher.dispatcher import DispatcherClient + + self.dispatcher_client = DispatcherClient(self.config) + + self.listener = await legacy_get_action_listener( + self.config, + LegacyGetActionListenerRequest( + worker_name=self.name, + services=["default"], + actions=self.actions, + slots=self.slots, + raw_labels=self.labels, + ), + ) + + logger.debug(f"acquired action listener: {self.listener.worker_id}") + except grpc.RpcError: + logger.exception("could not start action listener") + return + + self.action_loop_task = asyncio.create_task(self.start_action_loop()) + self.event_send_loop_task = asyncio.create_task(self.start_event_send_loop()) + self.blocked_main_loop = asyncio.create_task(self.start_blocked_main_loop()) + + async def _get_event(self) -> ActionEvent | STOP_LOOP_TYPE: + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, self.event_queue.get) + + async def start_event_send_loop(self) -> None: + while True: + event = await self._get_event() + if event == STOP_LOOP: + logger.debug("stopping event send loop...") + break + + logger.debug(f"tx: event: {event.action.action_id}/{event.type}") + t = asyncio.create_task(self.send_event(event)) + self.step_action_events.add(t) + t.add_done_callback(lambda t: self.step_action_events.discard(t)) + + async def start_blocked_main_loop(self) -> None: + threshold = 1 + while not self.killing: + count = 0 + for start_time in self.running_step_runs.values(): + diff = self.now() - start_time + if diff > threshold: + count += 1 + + if count > 0: + if self._waiting_steps_blocked_since is None: + self._waiting_steps_blocked_since = time.time() + blocked_for = time.time() - self._waiting_steps_blocked_since + logger.warning( + f"{BLOCKED_THREAD_WARNING} Waiting Steps {count} blocked_for={blocked_for:.1f}s" + ) + else: + self._waiting_steps_blocked_since = None + await asyncio.sleep(1) + + async def send_event(self, event: ActionEvent, retry_attempt: int = 1) -> None: + try: + match event.action.action_type: + case ActionType.START_STEP_RUN: + if event.type == STEP_EVENT_TYPE_STARTED: + if event.action.step_run_id in self.running_step_runs: + diff = ( + self.now() + - self.running_step_runs[event.action.step_run_id] + ) + if diff > 0.1: + logger.warning( + f"{BLOCKED_THREAD_WARNING} time to start: {diff}s" + ) + else: + logger.debug(f"start time: {diff}") + del self.running_step_runs[event.action.step_run_id] + else: + self.running_step_runs[event.action.step_run_id] = ( + self.now() + ) + + send_started_event_task = asyncio.create_task( + self.dispatcher_client.send_step_action_event( + event.action, + event.type, + event.payload, + event.should_not_retry, + ) + ) + + self.step_action_events.add(send_started_event_task) + send_started_event_task.add_done_callback( + lambda t: self.step_action_events.discard(t) + ) + case ActionType.CANCEL_STEP_RUN: + logger.debug("unimplemented event send") + case _: + logger.error("unknown action type for event send") + except Exception: + logger.exception( + f"could not send action event ({retry_attempt}/{ACTION_EVENT_RETRY_COUNT})" + ) + if retry_attempt <= ACTION_EVENT_RETRY_COUNT: + await exp_backoff_sleep(retry_attempt, 1) + await self.send_event(event, retry_attempt + 1) + + def now(self) -> float: + return time.time() + + async def start_action_loop(self) -> None: + if self.listener is None: + raise ValueError("listener not started") + + try: + async for action in self.listener: + if action is None: + break + + ctx_step_run_id.set(action.step_run_id) + ctx_workflow_run_id.set(action.workflow_run_id) + ctx_worker_id.set(action.worker_id) + ctx_action_key.set(action.key) + ctx_task_retry_count.set(action.retry_count) + + match action.action_type: + case ActionType.START_STEP_RUN: + self.event_queue.put( + ActionEvent( + action=action, + type=STEP_EVENT_TYPE_STARTED, + payload=None, + should_not_retry=False, + ) + ) + logger.info( + f"rx: start step run: {action.step_run_id}/{action.action_id}" + ) + + if action.step_run_id in self.running_step_runs: + logger.warning( + f"step run already running: {action.step_run_id}" + ) + + case ActionType.CANCEL_STEP_RUN: + logger.info(f"rx: cancel step run: {action.step_run_id}") + case _: + logger.error( + f"rx: unknown action type ({action.action_type}): {action.action_type}" + ) + try: + self.action_queue.put(action) + except Exception: + logger.exception("error putting action") + + except Exception: + logger.exception("error in action loop") + finally: + logger.info("action loop closed") + if not self.killing: + await self.exit_gracefully() + + async def cleanup(self) -> None: + self.killing = True + + await self.stop_health_server() + + if self.listener is not None: + self.listener.cleanup() + + self.event_queue.put(STOP_LOOP) + + async def exit_gracefully(self) -> None: + if self.listener: + self.listener.stop_signal = True + + if self.listener is not None: + try: + await self.pause_task_assignment() + except Exception: + logger.debug("failed to pause task assignment during graceful exit") + + if self.killing: + return + + logger.debug("closing action listener...") + + await self.cleanup() + + while not self.event_queue.empty(): + pass + + logger.info("action listener closed") + + def exit_forcefully(self) -> None: + asyncio.run(self.cleanup()) + logger.debug("forcefully closing listener...") + + +def legacy_worker_action_listener_process( + name: str, + actions: list[str], + slots: int, + config: ClientConfig, + action_queue: "Queue[Action]", + event_queue: "Queue[ActionEvent | STOP_LOOP_TYPE]", + handle_kill: bool, + debug: bool, + labels: dict[str, str | int], +) -> None: + async def run() -> None: + process = LegacyWorkerActionListenerProcess( + name=name, + actions=actions, + slots=slots, + config=config, + action_queue=action_queue, + event_queue=event_queue, + handle_kill=handle_kill, + debug=debug, + labels=labels, + ) + await process.start_health_server() + await process.start() + while not process.killing: # noqa: ASYNC110 + await asyncio.sleep(0.1) + + asyncio.run(run()) diff --git a/sdks/python/hatchet_sdk/deprecated/deprecation.py b/sdks/python/hatchet_sdk/deprecated/deprecation.py new file mode 100644 index 0000000000..a91071b140 --- /dev/null +++ b/sdks/python/hatchet_sdk/deprecated/deprecation.py @@ -0,0 +1,97 @@ +"""Generic time-aware deprecation helper. + +Timeline (from a given start date, with configurable windows): + 0 to warn_days: WARNING logged once per feature + warn_days to error_days: ERROR logged once per feature + after error_days: raises DeprecationError 1-in-5 calls (20% chance) + +Defaults: warn_days=90, error_days=None (error phase disabled unless explicitly set). +""" + +import random +from datetime import datetime, timezone + +from hatchet_sdk.logger import logger + +_DEFAULT_WARN_DAYS = 90 + +# Tracks which features have already been logged (keyed by feature name). +_already_logged: set[str] = set() + + +class DeprecationError(Exception): + """Raised when a deprecation grace period has expired.""" + + +def parse_semver(v: str) -> tuple[int, int, int]: + """Parse a semver string like ``"v0.78.23"`` into ``(major, minor, patch)``. + + Returns ``(0, 0, 0)`` if parsing fails. + """ + v = v.lstrip("v").split("-", 1)[0] + parts = v.split(".") + if len(parts) != 3: + return (0, 0, 0) + try: + return (int(parts[0]), int(parts[1]), int(parts[2])) + except ValueError: + return (0, 0, 0) + + +def semver_less_than(a: str, b: str) -> bool: + """Return ``True`` if semver string *a* is strictly less than *b*.""" + return parse_semver(a) < parse_semver(b) + + +def emit_deprecation_notice( + feature: str, + message: str, + start: datetime, + *, + warn_days: int = _DEFAULT_WARN_DAYS, + error_days: int | None = None, +) -> None: + """Emit a time-aware deprecation notice. + + Args: + feature: A short identifier for the deprecated feature (used for + deduplication so each feature only logs once per process). + message: The human-readable deprecation message. + start: The UTC datetime when the deprecation window began. + warn_days: Days after *start* during which a warning is logged (default 90). + error_days: Days after *start* during which an error is logged. + After this window, calls have a 20% chance of raising. + If None (default), the error/raise phase is never reached — + the notice stays at error-level logging indefinitely. + + Raises: + DeprecationError: After the error_days window, raised ~20% of the time. + """ + now = datetime.now(tz=timezone.utc) + days_since = (now - start).days + + if days_since < warn_days: + # Phase 1: warning + if feature not in _already_logged: + logger.warning(message) + _already_logged.add(feature) + + elif error_days is None or days_since < error_days: + # Phase 2: error-level log (indefinite when error_days is None) + if feature not in _already_logged: + logger.error( + f"{message} " "This fallback will be removed soon. Upgrade immediately." + ) + _already_logged.add(feature) + + else: + # Phase 3: raise 1-in-5 times + if feature not in _already_logged: + logger.error( + f"{message} " + "This fallback is no longer supported and will fail intermittently." + ) + _already_logged.add(feature) + + if random.random() < 0.2: + raise DeprecationError(f"{feature}: {message}") diff --git a/sdks/python/hatchet_sdk/deprecated/dispatcher.py b/sdks/python/hatchet_sdk/deprecated/dispatcher.py new file mode 100644 index 0000000000..343871abaa --- /dev/null +++ b/sdks/python/hatchet_sdk/deprecated/dispatcher.py @@ -0,0 +1,60 @@ +"""Legacy dispatcher registration using slots: int (pre-slot-config engines).""" + +import platform +from importlib.metadata import version +from sys import version_info +from typing import cast + +from hatchet_sdk.clients.dispatcher.action_listener import ActionListener +from hatchet_sdk.config import ClientConfig +from hatchet_sdk.connection import new_conn +from hatchet_sdk.contracts.dispatcher_pb2 import ( + SDKS, + RuntimeInfo, + WorkerLabels, + WorkerRegisterRequest, + WorkerRegisterResponse, +) +from hatchet_sdk.contracts.dispatcher_pb2_grpc import DispatcherStub +from hatchet_sdk.deprecated.action_listener import LegacyGetActionListenerRequest +from hatchet_sdk.metadata import get_metadata + +DEFAULT_REGISTER_TIMEOUT = 30 + + +async def legacy_get_action_listener( + config: ClientConfig, + req: LegacyGetActionListenerRequest, +) -> ActionListener: + """Register a worker using the legacy slots field (for pre-slot-config engines).""" + aio_conn = new_conn(config, True) + aio_client = DispatcherStub(aio_conn) + + # Override labels with the preset labels + preset_labels = config.worker_preset_labels + + for key, value in preset_labels.items(): + req.labels[key] = WorkerLabels(str_value=str(value)) + + response = cast( + WorkerRegisterResponse, + await aio_client.Register( # type: ignore[misc] + WorkerRegisterRequest( + worker_name=req.worker_name, + actions=req.actions, + services=req.services, + slots=req.slots, + labels=req.labels, + runtime_info=RuntimeInfo( + sdk_version=version("hatchet_sdk"), + language=SDKS.PYTHON, + language_version=f"{version_info.major}.{version_info.minor}.{version_info.micro}", + os=platform.system().lower(), + ), + ), + timeout=DEFAULT_REGISTER_TIMEOUT, + metadata=get_metadata(config.token), + ), + ) + + return ActionListener(config, response.worker_id) diff --git a/sdks/python/hatchet_sdk/deprecated/worker.py b/sdks/python/hatchet_sdk/deprecated/worker.py new file mode 100644 index 0000000000..1a84148a54 --- /dev/null +++ b/sdks/python/hatchet_sdk/deprecated/worker.py @@ -0,0 +1,250 @@ +"""Legacy dual-worker orchestration for pre-slot-config engines. + +When connected to an older Hatchet engine that does not support multiple slot types, +this module provides the old worker start flow which: + - Splits tasks into durable and non-durable registries + - Spawns separate action listener processes for each + - Creates separate action runners for each + - Monitors health of both processes +""" + +from __future__ import annotations + +import asyncio +import multiprocessing.context +import os +import sys +from multiprocessing import Queue +from typing import TYPE_CHECKING, Any + +from hatchet_sdk.deprecated.action_listener_process import ( + legacy_worker_action_listener_process, +) +from hatchet_sdk.logger import logger +from hatchet_sdk.runnables.action import Action +from hatchet_sdk.runnables.contextvars import task_count +from hatchet_sdk.runnables.task import Task +from hatchet_sdk.utils.typing import STOP_LOOP_TYPE +from hatchet_sdk.worker.action_listener_process import ActionEvent +from hatchet_sdk.worker.runner.run_loop_manager import WorkerActionRunLoopManager + +if TYPE_CHECKING: + from hatchet_sdk.worker.worker import Worker + + +async def legacy_aio_start(worker: Worker) -> None: + """Start the worker using the legacy dual-worker architecture. + + This is the old _aio_start flow that splits durable and non-durable tasks + into separate processes, for engines that don't understand slot_config. + """ + from hatchet_sdk.exceptions import LifespanSetupError + from hatchet_sdk.worker.worker import WorkerStatus + + main_pid = os.getpid() + + logger.info("------------------------------------------") + logger.info("STARTING HATCHET (legacy mode)...") + logger.debug(f"worker runtime starting on PID: {main_pid}") + + worker._status = WorkerStatus.STARTING + + if len(worker.action_registry.keys()) == 0: + raise ValueError( + "no actions registered, register workflows or actions before starting worker" + ) + + # Split the unified action registry into durable/non-durable + durable_action_registry: dict[str, Task[Any, Any]] = {} + non_durable_action_registry: dict[str, Task[Any, Any]] = {} + + for action_name, task in worker.action_registry.items(): + if task.is_durable: + durable_action_registry[action_name] = task + else: + non_durable_action_registry[action_name] = task + + has_any_non_durable = len(non_durable_action_registry) > 0 + has_any_durable = len(durable_action_registry) > 0 + + # Create separate queues for durable workers + durable_action_queue: Queue[Action | STOP_LOOP_TYPE] = worker.ctx.Queue() + durable_event_queue: Queue[ActionEvent] = worker.ctx.Queue() + + lifespan_context = None + if worker.lifespan: + try: + lifespan_context = await worker._setup_lifespan() + except LifespanSetupError as e: + logger.exception("lifespan setup failed") + if worker.loop: + worker.loop.stop() + raise e + + # Slot conversion: use default and durable from slot_config + default_slots = worker.slot_config.get("default", 100) + durable_slots = worker.slot_config.get("durable", 1000) + + durable_action_listener_process = None + durable_action_runner = None + + if has_any_non_durable: + worker.action_listener_process = _legacy_start_action_listener( + worker, + is_durable=False, + actions=list(non_durable_action_registry.keys()), + slots=default_slots, + action_queue=worker.action_queue, + event_queue=worker.event_queue, + ) + worker.action_runner = _legacy_run_action_runner( + worker, + name_suffix="", + action_registry=non_durable_action_registry, + max_runs=default_slots, + action_queue=worker.action_queue, + event_queue=worker.event_queue, + lifespan_context=lifespan_context, + ) + + if has_any_durable: + durable_action_listener_process = _legacy_start_action_listener( + worker, + is_durable=True, + actions=list(durable_action_registry.keys()), + slots=durable_slots, + action_queue=durable_action_queue, + event_queue=durable_event_queue, + ) + durable_action_runner = _legacy_run_action_runner( + worker, + name_suffix="_durable", + action_registry=durable_action_registry, + max_runs=durable_slots, + action_queue=durable_action_queue, + event_queue=durable_event_queue, + lifespan_context=lifespan_context, + ) + + if worker.loop: + # Store references for cleanup BEFORE the health check blocks, + # so they are available when exit_gracefully() runs. + worker.durable_action_listener_process = durable_action_listener_process + worker.durable_action_queue = durable_action_queue + worker.durable_event_queue = durable_event_queue + worker._legacy_durable_action_runner = durable_action_runner + + worker._lifespan_cleanup_complete = asyncio.Event() + worker.action_listener_health_check = worker.loop.create_task( + _legacy_check_listener_health( + worker, + durable_action_listener_process, + ) + ) + + await worker.action_listener_health_check + + try: + await worker._cleanup_lifespan() + except Exception: + logger.exception("lifespan cleanup failed") + finally: + worker._lifespan_cleanup_complete.set() + + +def _legacy_start_action_listener( + worker: Worker, + is_durable: bool, + actions: list[str], + slots: int, + action_queue: Queue[Any], + event_queue: Queue[Any], +) -> multiprocessing.context.SpawnProcess: + try: + process = worker.ctx.Process( + target=legacy_worker_action_listener_process, + args=( + worker.name + ("_durable" if is_durable else ""), + actions, + slots, + worker.config, + action_queue, + event_queue, + worker.handle_kill, + worker.client.debug, + worker.labels, + ), + ) + process.start() + logger.debug( + f"legacy action listener ({'durable' if is_durable else 'non-durable'}) starting on PID: {process.pid}" + ) + return process + except Exception: + logger.exception("failed to start legacy action listener") + sys.exit(1) + + +def _legacy_run_action_runner( + worker: Worker, + name_suffix: str, + action_registry: dict[str, Task[Any, Any]], + max_runs: int, + action_queue: Queue[Any], + event_queue: Queue[Any], + lifespan_context: Any | None, +) -> WorkerActionRunLoopManager: + if worker.loop: + return WorkerActionRunLoopManager( + worker.name + name_suffix, + action_registry, + max_runs, + worker.config, + action_queue, + event_queue, + worker.loop, + worker.handle_kill, + worker.client.debug, + worker.labels, + lifespan_context, + ) + + raise RuntimeError("event loop not set, cannot start action runner") + + +async def _legacy_check_listener_health( + worker: Worker, + durable_action_listener_process: multiprocessing.context.SpawnProcess | None, +) -> None: + from hatchet_sdk.worker.worker import WorkerStatus + + logger.debug("starting legacy action listener health check...") + try: + while not worker.killing: + if ( + not worker.action_listener_process + and not durable_action_listener_process + ) or ( + worker.action_listener_process + and durable_action_listener_process + and not worker.action_listener_process.is_alive() + and not durable_action_listener_process.is_alive() + ): + logger.debug("child action listener process killed...") + worker._status = WorkerStatus.UNHEALTHY + if worker.loop: + worker.loop.create_task(worker.exit_gracefully()) + break + + if ( + worker.config.terminate_worker_after_num_tasks + and task_count.value >= worker.config.terminate_worker_after_num_tasks + ): + if worker.loop: + worker.loop.create_task(worker.exit_gracefully()) + break + + worker._status = WorkerStatus.HEALTHY + await asyncio.sleep(1) + except Exception: + logger.exception("error checking listener health") diff --git a/sdks/python/hatchet_sdk/hatchet.py b/sdks/python/hatchet_sdk/hatchet.py index 1d76fac4c9..902d7ea9cb 100644 --- a/sdks/python/hatchet_sdk/hatchet.py +++ b/sdks/python/hatchet_sdk/hatchet.py @@ -39,6 +39,7 @@ normalize_validator, ) from hatchet_sdk.runnables.workflow import BaseWorkflow, Standalone, Workflow +from hatchet_sdk.utils.slots import normalize_slot_config, resolve_worker_slot_config from hatchet_sdk.utils.timedelta_to_expression import Duration from hatchet_sdk.utils.typing import CoroutineLike, JSONSerializableMapping from hatchet_sdk.worker.worker import LifespanFn, Worker @@ -186,8 +187,8 @@ def namespace(self) -> str: def worker( self, name: str, - slots: int = 100, - durable_slots: int = 1_000, + slots: int | None = None, + durable_slots: int | None = None, labels: dict[str, str | int] | None = None, workflows: list[BaseWorkflow[Any]] | None = None, lifespan: LifespanFn | None = None, @@ -197,9 +198,9 @@ def worker( :param name: The name of the worker. - :param slots: The number of workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time + :param slots: slot count for standard tasks. - :param durable_slots: The number of durable workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time that are durable. + :param durable_slots: slot count for durable tasks. :param labels: A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. @@ -215,10 +216,16 @@ def worker( except RuntimeError: loop = None + resolved_config = resolve_worker_slot_config( + None, + slots, + durable_slots, + workflows, + ) + return Worker( name=name, - slots=slots, - durable_slots=durable_slots, + slot_config=normalize_slot_config(resolved_config), labels=labels, config=self._client.config, debug=self._client.debug, diff --git a/sdks/python/hatchet_sdk/runnables/task.py b/sdks/python/hatchet_sdk/runnables/task.py index c14d691747..34b7bd332c 100644 --- a/sdks/python/hatchet_sdk/runnables/task.py +++ b/sdks/python/hatchet_sdk/runnables/task.py @@ -149,8 +149,12 @@ def __init__( wait_for: list[Condition | OrGroup] | None, skip_if: list[Condition | OrGroup] | None, cancel_if: list[Condition | OrGroup] | None, + slot_requests: dict[str, int] | None = None, ) -> None: self.is_durable = is_durable + if slot_requests is None: + slot_requests = {"durable": 1} if is_durable else {"default": 1} + self.slot_requests = slot_requests self.fn = _fn self.is_async_function = is_async_fn(self.fn) # type: ignore diff --git a/sdks/python/hatchet_sdk/utils/slots.py b/sdks/python/hatchet_sdk/utils/slots.py new file mode 100644 index 0000000000..95815531ed --- /dev/null +++ b/sdks/python/hatchet_sdk/utils/slots.py @@ -0,0 +1,91 @@ +from typing import Any + +from hatchet_sdk.runnables.workflow import BaseWorkflow +from hatchet_sdk.worker.slot_types import SlotType + + +def normalize_slot_config( + slot_config: dict[SlotType | str, int], +) -> dict[str, int]: + return { + (key.value if isinstance(key, SlotType) else key): value + for key, value in slot_config.items() + } + + +def has_slot_config( + slot_config: dict[SlotType | str, int], slot_type: SlotType +) -> bool: + return slot_type in slot_config or slot_type.value in slot_config + + +def ensure_slot_config( + slot_config: dict[SlotType | str, int], slot_type: SlotType, default_value: int +) -> dict[SlotType | str, int]: + if has_slot_config(slot_config, slot_type): + return slot_config + return {**slot_config, slot_type: default_value} + + +def required_slot_types_from_workflows( + workflows: list[BaseWorkflow[Any]] | None, +) -> set[str]: + required: set[str] = set() + if not workflows: + return required + + for workflow in workflows: + for task in workflow.tasks: + if task.is_durable: + required.add(SlotType.DURABLE.value) + for key in task.slot_requests: + required.add(key.value if isinstance(key, SlotType) else key) + + return required + + +def resolve_worker_slot_config( + slot_config: dict[SlotType | str, int] | None, + slots: int | None, + durable_slots: int | None, + workflows: list[BaseWorkflow[Any]] | None, +) -> dict[SlotType | str, int]: + resolved_config: dict[SlotType | str, int] + + if slot_config is not None: + resolved_config = slot_config + else: + legacy_config: dict[SlotType | str, int] = { + key: value + for key, value in ( + (SlotType.DEFAULT, slots), + (SlotType.DURABLE, durable_slots), + ) + if value is not None + } + resolved_config = legacy_config if legacy_config else {} + + required_slot_types = required_slot_types_from_workflows(workflows) + + # Apply defaults for well-known slot types + if SlotType.DEFAULT.value in required_slot_types: + resolved_config = ensure_slot_config(resolved_config, SlotType.DEFAULT, 100) + if SlotType.DURABLE.value in required_slot_types: + resolved_config = ensure_slot_config(resolved_config, SlotType.DURABLE, 1000) + + # Raise for any required custom slot types that are missing from the config + configured_keys = { + key.value if isinstance(key, SlotType) else key for key in resolved_config + } + missing = required_slot_types - configured_keys + if missing: + formatted = ", ".join(sorted(missing)) + raise ValueError( + f"Worker is missing slot config for required slot type(s): {formatted}. " + "Please provide a slot_config entry for each custom slot type used by your workflows." + ) + + if not resolved_config: + resolved_config = {SlotType.DEFAULT: 100} + + return resolved_config diff --git a/sdks/python/hatchet_sdk/worker/action_listener_process.py b/sdks/python/hatchet_sdk/worker/action_listener_process.py index 5befd4bce6..6db65998b4 100644 --- a/sdks/python/hatchet_sdk/worker/action_listener_process.py +++ b/sdks/python/hatchet_sdk/worker/action_listener_process.py @@ -3,6 +3,7 @@ import logging import signal import time +import warnings from dataclasses import dataclass from datetime import timedelta from enum import Enum @@ -67,7 +68,7 @@ def __init__( self, name: str, actions: list[str], - slots: int, + slot_config: dict[str, int], config: ClientConfig, action_queue: "Queue[Action]", event_queue: "Queue[ActionEvent | STOP_LOOP_TYPE]", @@ -77,7 +78,9 @@ def __init__( ) -> None: self.name = name self.actions = actions - self.slots = slots + self.slot_config = slot_config + self._slots = slot_config.get("default", 0) + self._durable_slots = slot_config.get("durable", 0) self.config = config self.action_queue = action_queue self.event_queue = event_queue @@ -129,6 +132,24 @@ def __init__( "Event loop lag in seconds (listener process)", ) + @property + def slots(self) -> int: + warnings.warn( + "WorkerActionListenerProcess.slots is deprecated; use slot_config['default'] instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._slots + + @property + def durable_slots(self) -> int: + warnings.warn( + "WorkerActionListenerProcess.durable_slots is deprecated; use slot_config['durable'] instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._durable_slots + async def _monitor_event_loop(self) -> None: # If the loop is blocked, this coroutine itself can't run; when it resumes, # we detect the lag by comparing elapsed time vs expected sleep. @@ -320,7 +341,7 @@ async def start(self, retry_attempt: int = 0) -> None: worker_name=self.name, services=["default"], actions=self.actions, - slots=self.slots, + slot_config=self.slot_config, raw_labels=self.labels, ) ) @@ -518,7 +539,7 @@ def exit_forcefully(self) -> None: def worker_action_listener_process( name: str, actions: list[str], - slots: int, + slot_config: dict[str, int], config: ClientConfig, action_queue: "Queue[Action]", event_queue: "Queue[ActionEvent | STOP_LOOP_TYPE]", @@ -530,7 +551,7 @@ async def run() -> None: process = WorkerActionListenerProcess( name=name, actions=actions, - slots=slots, + slot_config=slot_config, config=config, action_queue=action_queue, event_queue=event_queue, diff --git a/sdks/python/hatchet_sdk/worker/slot_types.py b/sdks/python/hatchet_sdk/worker/slot_types.py new file mode 100644 index 0000000000..70702f7ee6 --- /dev/null +++ b/sdks/python/hatchet_sdk/worker/slot_types.py @@ -0,0 +1,6 @@ +from enum import Enum + + +class SlotType(str, Enum): + DEFAULT = "default" + DURABLE = "durable" diff --git a/sdks/python/hatchet_sdk/worker/worker.py b/sdks/python/hatchet_sdk/worker/worker.py index 331af7504b..54248b5f3e 100644 --- a/sdks/python/hatchet_sdk/worker/worker.py +++ b/sdks/python/hatchet_sdk/worker/worker.py @@ -14,9 +14,13 @@ from typing import Any, TypeVar from warnings import warn +import grpc + from hatchet_sdk.client import Client from hatchet_sdk.config import ClientConfig from hatchet_sdk.contracts.v1.workflows_pb2 import CreateWorkflowVersionRequest +from hatchet_sdk.deprecated.deprecation import semver_less_than +from hatchet_sdk.deprecated.worker import legacy_aio_start from hatchet_sdk.exceptions import LifespanSetupError, LoopAlreadyRunningError from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action @@ -65,8 +69,7 @@ def __init__( self, name: str, config: ClientConfig, - slots: int, - durable_slots: int, + slot_config: dict[str, int], labels: dict[str, str | int] | None = None, debug: bool = False, owned_loop: bool = True, @@ -76,15 +79,15 @@ def __init__( ) -> None: self.config = config self.name = self.config.apply_namespace(name) - self.slots = slots - self.durable_slots = durable_slots + self.slot_config = slot_config + self._slots = slot_config.get("default", 0) + self._durable_slots = slot_config.get("durable", 0) self.debug = debug self.labels = labels or {} self.handle_kill = handle_kill self.owned_loop = owned_loop self.action_registry: dict[str, Task[Any, Any]] = {} - self.durable_action_registry: dict[str, Task[Any, Any]] = {} self.killing: bool = False self._status: WorkerStatus = WorkerStatus.INITIALIZED @@ -95,15 +98,14 @@ def __init__( self.action_listener_health_check: asyncio.Task[None] self.action_runner: WorkerActionRunLoopManager | None = None - self.durable_action_runner: WorkerActionRunLoopManager | None = None + self._legacy_durable_action_runner: WorkerActionRunLoopManager | None = None self.ctx = multiprocessing.get_context("spawn") self.action_queue: Queue[Action | STOP_LOOP_TYPE] = self.ctx.Queue() self.event_queue: Queue[ActionEvent] = self.ctx.Queue() - - self.durable_action_queue: Queue[Action | STOP_LOOP_TYPE] = self.ctx.Queue() - self.durable_event_queue: Queue[ActionEvent] = self.ctx.Queue() + self.durable_action_queue: Queue[Action | STOP_LOOP_TYPE] | None = None + self.durable_event_queue: Queue[ActionEvent] | None = None self.loop: asyncio.AbstractEventLoop | None = None @@ -111,9 +113,6 @@ def __init__( self._setup_signal_handlers() - self.has_any_durable = False - self.has_any_non_durable = False - self.lifespan = lifespan self.lifespan_stack: AsyncExitStack | None = None self._lifespan_cleanup_complete: asyncio.Event | None = None @@ -141,12 +140,7 @@ def register_workflow(self, workflow: BaseWorkflow[Any]) -> None: for step in workflow.tasks: action_name = workflow._create_action_name(step) - if step.is_durable: - self.has_any_durable = True - self.durable_action_registry[action_name] = step - else: - self.has_any_non_durable = True - self.action_registry[action_name] = step + self.action_registry[action_name] = step def register_workflows(self, workflows: list[BaseWorkflow[Any]]) -> None: for workflow in workflows: @@ -156,6 +150,24 @@ def register_workflows(self, workflows: list[BaseWorkflow[Any]]) -> None: def status(self) -> WorkerStatus: return self._status + @property + def slots(self) -> int: + warn( + "Worker.slots is deprecated; use slot_config['default'] instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._slots + + @property + def durable_slots(self) -> int: + warn( + "Worker.durable_slots is deprecated; use slot_config['durable'] instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._durable_slots + def _setup_loop(self) -> None: try: asyncio.get_running_loop() @@ -171,7 +183,7 @@ def _setup_loop(self) -> None: asyncio.set_event_loop(self.loop) def start(self, options: WorkerStartOptions = WorkerStartOptions()) -> None: - if not (self.action_registry or self.durable_action_registry): + if not self.action_registry: raise ValueError( "no actions registered, register workflows before starting worker" ) @@ -196,6 +208,48 @@ def start(self, options: WorkerStartOptions = WorkerStartOptions()) -> None: if self.handle_kill: sys.exit(0) + # Minimum engine version that supports multiple slot types. + _MIN_SLOT_CONFIG_VERSION = "v0.78.23" + + def _emit_legacy_deprecation(self) -> None: + from datetime import datetime, timezone + + from hatchet_sdk.deprecated.deprecation import emit_deprecation_notice + + emit_deprecation_notice( + feature="legacy-engine", + message=( + "Connected to an older Hatchet engine that does not support " + "multiple slot types. Falling back to legacy worker registration. " + "Please upgrade your Hatchet engine to the latest version." + ), + start=datetime(2026, 2, 12, tzinfo=timezone.utc), + error_days=180, + ) + + async def _check_engine_version(self) -> str | None: + """Returns the engine version string, or None if engine is legacy (pre-slot-config). + + Compares the engine's semantic version against the minimum required + version for slot_config support. Returns the version string for modern + engines so callers can branch on specific versions. + """ + + try: + version = await self.client.dispatcher.get_version() + + # Empty version or older than minimum → legacy + if not version or semver_less_than(version, self._MIN_SLOT_CONFIG_VERSION): + self._emit_legacy_deprecation() + return None + + return version # new engine + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.UNIMPLEMENTED: + self._emit_legacy_deprecation() + return None # old engine + raise + async def _aio_start(self) -> None: main_pid = os.getpid() @@ -205,14 +259,17 @@ async def _aio_start(self) -> None: self._status = WorkerStatus.STARTING - if ( - len(self.action_registry.keys()) == 0 - and len(self.durable_action_registry.keys()) == 0 - ): + if len(self.action_registry.keys()) == 0: raise ValueError( "no actions registered, register workflows or actions before starting worker" ) + # Check engine version and fall back to legacy dual-worker mode if needed + engine_version = await self._check_engine_version() + if engine_version is None: + await legacy_aio_start(self) + return + lifespan_context = None if self.lifespan: try: @@ -226,34 +283,13 @@ async def _aio_start(self) -> None: # Healthcheck server is started inside the spawned action-listener process # (non-durable preferred) to avoid being affected by the main worker loop. healthcheck_port = self.config.healthcheck.port - enable_health_server_non_durable = ( - self.config.healthcheck.enabled and self.has_any_non_durable - ) - enable_health_server_durable = ( - self.config.healthcheck.enabled - and (not self.has_any_non_durable) - and self.has_any_durable - ) + enable_health_server = self.config.healthcheck.enabled - if self.has_any_non_durable: - self.action_listener_process = self._start_action_listener( - is_durable=False, - enable_health_server=enable_health_server_non_durable, - healthcheck_port=healthcheck_port, - ) - self.action_runner = self._run_action_runner( - is_durable=False, lifespan_context=lifespan_context - ) - - if self.has_any_durable: - self.durable_action_listener_process = self._start_action_listener( - is_durable=True, - enable_health_server=enable_health_server_durable, - healthcheck_port=healthcheck_port, - ) - self.durable_action_runner = self._run_action_runner( - is_durable=True, lifespan_context=lifespan_context - ) + self.action_listener_process = self._start_action_listener( + enable_health_server=enable_health_server, + healthcheck_port=healthcheck_port, + ) + self.action_runner = self._run_action_runner(lifespan_context=lifespan_context) if self.loop: self._lifespan_cleanup_complete = asyncio.Event() @@ -271,17 +307,17 @@ async def _aio_start(self) -> None: self._lifespan_cleanup_complete.set() def _run_action_runner( - self, is_durable: bool, lifespan_context: Any | None + self, lifespan_context: Any | None ) -> WorkerActionRunLoopManager: # Retrieve the shared queue if self.loop: return WorkerActionRunLoopManager( - self.name + ("_durable" if is_durable else ""), - self.durable_action_registry if is_durable else self.action_registry, - self.durable_slots if is_durable else self.slots, + self.name, + self.action_registry, + sum(self.slot_config.values()), self.config, - self.durable_action_queue if is_durable else self.action_queue, - self.durable_event_queue if is_durable else self.event_queue, + self.action_queue, + self.event_queue, self.loop, self.handle_kill, self.client.debug, @@ -319,7 +355,6 @@ async def _cleanup_lifespan(self) -> None: def _start_action_listener( self, - is_durable: bool, *, enable_health_server: bool = False, healthcheck_port: int = 8001, @@ -328,16 +363,12 @@ def _start_action_listener( process = self.ctx.Process( target=worker_action_listener_process, args=( - self.name + ("_durable" if is_durable else ""), - ( - list(self.durable_action_registry.keys()) - if is_durable - else list(self.action_registry.keys()) - ), - self.durable_slots if is_durable else self.slots, + self.name, + list(self.action_registry.keys()), + self.slot_config, self.config, - self.durable_action_queue if is_durable else self.action_queue, - self.durable_event_queue if is_durable else self.event_queue, + self.action_queue, + self.event_queue, self.handle_kill, self.client.debug, self.labels, @@ -357,12 +388,7 @@ async def _check_listener_health(self) -> None: while not self.killing: if ( not self.action_listener_process - and not self.durable_action_listener_process - ) or ( - self.action_listener_process - and self.durable_action_listener_process - and not self.action_listener_process.is_alive() - and not self.durable_action_listener_process.is_alive() + or not self.action_listener_process.is_alive() ): logger.debug("child action listener process killed...") self._status = WorkerStatus.UNHEALTHY @@ -415,7 +441,7 @@ def _handle_force_quit_signal(self, signum: int, frame: FrameType | None) -> Non self.loop.create_task(self._exit_forcefully()) def _close_queues(self) -> None: - queues: list[Queue[Any]] = [ + queues: list[Queue[Any] | None] = [ self.action_queue, self.event_queue, self.durable_action_queue, @@ -423,10 +449,12 @@ def _close_queues(self) -> None: ] for queue in queues: + if queue is None: + continue try: queue.cancel_join_thread() queue.close() - except Exception: # noqa: PERF203 + except Exception: continue def _terminate_processes(self) -> None: @@ -454,8 +482,9 @@ async def _close(self) -> None: if self.action_runner is not None: self.action_runner.cleanup() - if self.durable_action_runner is not None: - self.durable_action_runner.cleanup() + # Also clean up the durable action runner (legacy mode) + if self._legacy_durable_action_runner is not None: + self._legacy_durable_action_runner.cleanup() await self.action_listener_health_check @@ -473,11 +502,24 @@ async def exit_gracefully(self) -> None: await self.action_runner.wait_for_tasks() await self.action_runner.exit_gracefully() - if self.durable_action_runner: - await self.durable_action_runner.wait_for_tasks() - await self.durable_action_runner.exit_gracefully() + # Also clean up the durable action runner (legacy mode) + if self._legacy_durable_action_runner: + await self._legacy_durable_action_runner.wait_for_tasks() + await self._legacy_durable_action_runner.exit_gracefully() - self._terminate_processes() + if self.action_listener_process and self.action_listener_process.is_alive(): + self.action_listener_process.kill() + + if ( + self.durable_action_listener_process + and self.durable_action_listener_process.is_alive() + ): + self.durable_action_listener_process.kill() + + try: + await self._cleanup_lifespan() + except LifespanSetupError: + logger.exception("lifespan cleanup failed") await self._close() diff --git a/sdks/python/lint.sh b/sdks/python/lint.sh index 943aae7c59..49bea2b4dd 100755 --- a/sdks/python/lint.sh +++ b/sdks/python/lint.sh @@ -8,7 +8,6 @@ poetry run ruff check . --fix echo "Formatting with black" poetry run black . --color - echo "\nType checking with mypy" poetry run mypy --config-file=pyproject.toml diff --git a/sdks/python/poetry.lock b/sdks/python/poetry.lock index 52e04fa206..6b8bc72a23 100644 --- a/sdks/python/poetry.lock +++ b/sdks/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -153,7 +153,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli (>=1.2)", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi (>=1.2)"] +speedups = ["Brotli (>=1.2) ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "backports.zstd ; platform_python_implementation == \"CPython\" and python_version < \"3.14\"", "brotlicffi (>=1.2) ; platform_python_implementation != \"CPython\""] [[package]] name = "aiosignal" @@ -201,7 +201,7 @@ idna = ">=2.8" typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -trio = ["trio (>=0.31.0)", "trio (>=0.32.0)"] +trio = ["trio (>=0.31.0) ; python_version < \"3.10\"", "trio (>=0.32.0) ; python_version >= \"3.10\""] [[package]] name = "async-timeout" @@ -210,7 +210,7 @@ description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, @@ -509,7 +509,7 @@ files = [ ] [package.extras] -dev = ["docstring-parser[docs]", "docstring-parser[test]", "pre-commit (>=2.16.0)"] +dev = ["docstring-parser[docs]", "docstring-parser[test]", "pre-commit (>=2.16.0) ; python_version >= \"3.9\""] docs = ["pydoctor (>=25.4.0)"] test = ["pytest"] @@ -520,7 +520,7 @@ description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["docs", "test"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"}, {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"}, @@ -969,7 +969,7 @@ httpcore = "==1.*" idna = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -1007,12 +1007,12 @@ files = [ zipp = ">=3.20" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] @@ -1416,7 +1416,7 @@ watchdog = ">=2.0" [package.extras] i18n = ["babel (>=2.9.0)"] -min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.4)", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] [[package]] name = "mkdocs-autorefs" @@ -2231,12 +2231,12 @@ typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""} tzdata = {version = "*", markers = "sys_platform == \"win32\""} [package.extras] -binary = ["psycopg-binary (==3.3.2)"] -c = ["psycopg-c (==3.3.2)"] +binary = ["psycopg-binary (==3.3.2) ; implementation_name != \"pypy\""] +c = ["psycopg-c (==3.3.2) ; implementation_name != \"pypy\""] dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "cython-lint (>=0.16)", "dnspython (>=2.1)", "flake8 (>=4.0)", "isort-psycopg", "isort[colors] (>=6.0)", "mypy (>=1.19.0)", "pre-commit (>=4.0.1)", "types-setuptools (>=57.4)", "types-shapely (>=2.0)", "wheel (>=0.37)"] docs = ["Sphinx (>=5.0)", "furo (==2022.6.21)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.12)"] pool = ["psycopg-pool"] -test = ["anyio (>=4.0)", "mypy (>=1.19.0)", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"] +test = ["anyio (>=4.0)", "mypy (>=1.19.0) ; implementation_name != \"pypy\"", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"] [[package]] name = "psycopg-pool" @@ -2276,7 +2276,7 @@ typing-inspection = ">=0.4.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -2838,13 +2838,13 @@ files = [ ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] -core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" @@ -2923,7 +2923,7 @@ description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["docs", "lint", "test"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"}, {file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"}, @@ -3137,10 +3137,10 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.2.0)", "brotlicffi (>=1.2.0.0)"] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["backports-zstd (>=1.0.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] [[package]] name = "uvicorn" @@ -3160,7 +3160,7 @@ h11 = ">=0.8" typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] [[package]] name = "watchdog" @@ -3456,7 +3456,7 @@ files = [ ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] diff --git a/sdks/python/tests/test_client.py b/sdks/python/tests/test_client.py index 0021a46af2..4dc9c63b76 100644 --- a/sdks/python/tests/test_client.py +++ b/sdks/python/tests/test_client.py @@ -1,7 +1,13 @@ import os +from typing import Any, cast from unittest import mock +import pytest + from hatchet_sdk.config import ClientConfig +from hatchet_sdk.runnables.workflow import BaseWorkflow +from hatchet_sdk.utils.slots import resolve_worker_slot_config +from hatchet_sdk.worker.slot_types import SlotType def test_client_initialization_from_defaults() -> None: @@ -33,3 +39,89 @@ def test_client_server_url_override_when_env_var() -> None: assert config.server_url == "foobaz:8080" assert config.host_port == ClientConfig().host_port + + +def test_resolve_slot_config_no_durable() -> None: + resolved = resolve_worker_slot_config( + slot_config=None, + slots=None, + durable_slots=None, + workflows=None, + ) + + assert resolved == {SlotType.DEFAULT: 100} + + +def test_resolve_slot_config_only_durable() -> None: + class DummyTask: + is_durable = True + slot_requests: dict[str, int] = {"durable": 1} + + class DummyWorkflow: + tasks = [DummyTask()] + + resolved = resolve_worker_slot_config( + slot_config=None, + slots=None, + durable_slots=None, + workflows=cast(list[BaseWorkflow[Any]], [DummyWorkflow()]), + ) + + assert resolved == {SlotType.DURABLE: 1000} + + +def test_resolve_slot_config_mixed() -> None: + class DefaultTask: + is_durable = False + slot_requests: dict[str, int] = {"default": 1} + + class DurableTask: + is_durable = True + slot_requests: dict[str, int] = {"durable": 1} + + class DummyWorkflow: + tasks = [DefaultTask(), DurableTask()] + + resolved = resolve_worker_slot_config( + slot_config=None, + slots=None, + durable_slots=None, + workflows=cast(list[BaseWorkflow[Any]], [DummyWorkflow()]), + ) + + assert resolved == {SlotType.DEFAULT: 100, SlotType.DURABLE: 1000} + + +def test_resolve_slot_config_custom_type_raises_when_missing() -> None: + class GpuTask: + is_durable = False + slot_requests: dict[str, int] = {"gpu": 1} + + class DummyWorkflow: + tasks = [GpuTask()] + + with pytest.raises(ValueError, match="gpu"): + resolve_worker_slot_config( + slot_config=None, + slots=None, + durable_slots=None, + workflows=cast(list[BaseWorkflow[Any]], [DummyWorkflow()]), + ) + + +def test_resolve_slot_config_custom_type_passes_when_configured() -> None: + class GpuTask: + is_durable = False + slot_requests: dict[str, int] = {"gpu": 1} + + class DummyWorkflow: + tasks = [GpuTask()] + + resolved = resolve_worker_slot_config( + slot_config={"gpu": 4}, + slots=None, + durable_slots=None, + workflows=cast(list[BaseWorkflow[Any]], [DummyWorkflow()]), + ) + + assert resolved == {"gpu": 4} diff --git a/sdks/python/tests/test_semver.py b/sdks/python/tests/test_semver.py new file mode 100644 index 0000000000..186d5f22f8 --- /dev/null +++ b/sdks/python/tests/test_semver.py @@ -0,0 +1,54 @@ +from hatchet_sdk.deprecated.deprecation import parse_semver, semver_less_than + + +def test_parse_semver_standard_version_with_v_prefix() -> None: + assert parse_semver("v0.78.23") == (0, 78, 23) + + +def test_parse_semver_without_v_prefix() -> None: + assert parse_semver("1.2.3") == (1, 2, 3) + + +def test_parse_semver_strips_prerelease_suffix() -> None: + assert parse_semver("v0.1.0-alpha.0") == (0, 1, 0) + assert parse_semver("v10.20.30-rc.1") == (10, 20, 30) + + +def test_parse_semver_empty_string() -> None: + assert parse_semver("") == (0, 0, 0) + + +def test_parse_semver_malformed_input() -> None: + assert parse_semver("v1.2") == (0, 0, 0) + assert parse_semver("not-a-version") == (0, 0, 0) + + +def test_semver_less_than_patch() -> None: + assert semver_less_than("v0.78.22", "v0.78.23") is True + + +def test_semver_less_than_equal() -> None: + assert semver_less_than("v0.78.23", "v0.78.23") is False + + +def test_semver_less_than_greater_than_patch() -> None: + assert semver_less_than("v0.78.24", "v0.78.23") is False + + +def test_semver_less_than_minor_comparison() -> None: + assert semver_less_than("v0.77.99", "v0.78.0") is True + assert semver_less_than("v0.79.0", "v0.78.99") is False + + +def test_semver_less_than_major_comparison() -> None: + assert semver_less_than("v0.78.23", "v1.0.0") is True + assert semver_less_than("v1.0.0", "v0.99.99") is False + + +def test_semver_less_than_prerelease() -> None: + assert semver_less_than("v0.1.0-alpha.0", "v0.78.23") is True + + +def test_semver_less_than_empty_string_as_zero() -> None: + assert semver_less_than("", "v0.78.23") is True + assert semver_less_than("v0.78.23", "") is False diff --git a/sdks/ruby/generate.sh b/sdks/ruby/generate.sh index 279b14ec79..d5f224fbd2 100755 --- a/sdks/ruby/generate.sh +++ b/sdks/ruby/generate.sh @@ -76,14 +76,14 @@ generate_rest() { # Generate local additional_props="gemName=hatchet-sdk-rest,moduleName=HatchetSdkRest,gemVersion=0.0.1,gemDescription=HatchetRubySDKRestClient,gemAuthor=HatchetTeam,gemHomepage=https://github.com/hatchet-dev/hatchet,gemLicense=MIT,library=faraday" - + # TODO-RUBY: we can generate docs here :wow: local cmd=( openapi-generator-cli generate -i "$openapi_spec" -g ruby -o "$output_dir" --skip-validate-spec - --global-property "apiTests=false,modelTests=false,apiDocs=true,modelDocs=true" + --global-property "apiTests=false,modelTests=false,apiDocs=false,modelDocs=false" --additional-properties "$additional_props" ) @@ -117,25 +117,19 @@ apply_cookie_auth_patch() { path = ARGV[0] content = File.read(path) - old_auth = <<~RUBY.strip - case auth_setting[:in] - when '\''header'\'' then header_params[auth_setting[:key]] = auth_setting[:value] - when '\''query'\'' then query_params[auth_setting[:key]] = auth_setting[:value] - else fail ArgumentError, '\''Authentication token must be in `query` or `header`'\'' - end - RUBY - - new_auth = <<~RUBY.strip - next if auth_setting[:value].nil? || auth_setting[:value].to_s.empty? - case auth_setting[:in] - when '\''header'\'' then header_params[auth_setting[:key]] = auth_setting[:value] - when '\''query'\'' then query_params[auth_setting[:key]] = auth_setting[:value] - when '\''cookie'\'' then header_params['\''Cookie'\''] = "#{auth_setting[:key]}=#{auth_setting[:value]}" - else next # skip unsupported auth locations - end - RUBY - - if content.sub!(old_auth, new_auth) + # Match the auth switch block regardless of indentation + old_pattern = /^(\s*)case auth_setting\[:in\]\n\s*when '\''header'\'' then header_params\[auth_setting\[:key\]\] = auth_setting\[:value\]\n\s*when '\''query'\'' then query_params\[auth_setting\[:key\]\] = auth_setting\[:value\]\n\s*else fail ArgumentError, '\''Authentication token must be in `query` or `header`'\''\n\s*end/ + + if content.match(old_pattern) + indent = content.match(old_pattern)[1] + new_auth = "#{indent}next if auth_setting[:value].nil? || auth_setting[:value].to_s.empty?\n" \ + "#{indent}case auth_setting[:in]\n" \ + "#{indent}when '\''header'\'' then header_params[auth_setting[:key]] = auth_setting[:value]\n" \ + "#{indent}when '\''query'\'' then query_params[auth_setting[:key]] = auth_setting[:value]\n" \ + "#{indent}when '\''cookie'\'' then header_params['\''Cookie'\''] = \"\#{auth_setting[:key]}=\#{auth_setting[:value]}\"\n" \ + "#{indent}else next\n" \ + "#{indent}end" + content.sub!(old_pattern, new_auth) File.write(path, content) puts " Patched api_client.rb" else diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/.openapi-generator/FILES b/sdks/ruby/src/lib/hatchet/clients/rest/.openapi-generator/FILES index 459feaee71..01c86ff31e 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/.openapi-generator/FILES +++ b/sdks/ruby/src/lib/hatchet/clients/rest/.openapi-generator/FILES @@ -1,6 +1,5 @@ .gitignore .gitlab-ci.yml -.openapi-generator-ignore .rspec .rubocop.yml .travis.yml @@ -207,6 +206,7 @@ lib/hatchet-sdk-rest/models/v1_webhook_hmac_algorithm.rb lib/hatchet-sdk-rest/models/v1_webhook_hmac_auth.rb lib/hatchet-sdk-rest/models/v1_webhook_hmac_encoding.rb lib/hatchet-sdk-rest/models/v1_webhook_list.rb +lib/hatchet-sdk-rest/models/v1_webhook_response.rb lib/hatchet-sdk-rest/models/v1_webhook_source_name.rb lib/hatchet-sdk-rest/models/v1_workflow_run.rb lib/hatchet-sdk-rest/models/v1_workflow_run_details.rb diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/README.md b/sdks/ruby/src/lib/hatchet/clients/rest/README.md index 7d6fb6bf3b..024ffeb69e 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/README.md +++ b/sdks/ruby/src/lib/hatchet/clients/rest/README.md @@ -407,6 +407,7 @@ Class | Method | HTTP request | Description - [HatchetSdkRest::V1WebhookHMACAuth](docs/V1WebhookHMACAuth.md) - [HatchetSdkRest::V1WebhookHMACEncoding](docs/V1WebhookHMACEncoding.md) - [HatchetSdkRest::V1WebhookList](docs/V1WebhookList.md) + - [HatchetSdkRest::V1WebhookResponse](docs/V1WebhookResponse.md) - [HatchetSdkRest::V1WebhookSourceName](docs/V1WebhookSourceName.md) - [HatchetSdkRest::V1WorkflowRun](docs/V1WorkflowRun.md) - [HatchetSdkRest::V1WorkflowRunDetails](docs/V1WorkflowRunDetails.md) diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/hatchet-sdk-rest.gemspec b/sdks/ruby/src/lib/hatchet/clients/rest/hatchet-sdk-rest.gemspec index 7d866c3204..f9fb3b6580 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/hatchet-sdk-rest.gemspec +++ b/sdks/ruby/src/lib/hatchet/clients/rest/hatchet-sdk-rest.gemspec @@ -19,11 +19,11 @@ Gem::Specification.new do |s| s.name = "hatchet-sdk-rest" s.version = HatchetSdkRest::VERSION s.platform = Gem::Platform::RUBY - s.authors = ["Hatchet Team"] + s.authors = ["HatchetTeam"] s.email = [""] s.homepage = "https://github.com/hatchet-dev/hatchet" s.summary = "Hatchet API Ruby Gem" - s.description = "Ruby REST client for Hatchet API generated from OpenAPI specification" + s.description = "HatchetRubySDKRestClient" s.license = "MIT" s.required_ruby_version = ">= 2.7" s.metadata = {} diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest.rb b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest.rb index 7c9b91a976..a9a125524d 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest.rb +++ b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest.rb @@ -190,6 +190,7 @@ HatchetSdkRest.autoload :V1WebhookHMACAuth, 'hatchet-sdk-rest/models/v1_webhook_hmac_auth' HatchetSdkRest.autoload :V1WebhookHMACEncoding, 'hatchet-sdk-rest/models/v1_webhook_hmac_encoding' HatchetSdkRest.autoload :V1WebhookList, 'hatchet-sdk-rest/models/v1_webhook_list' +HatchetSdkRest.autoload :V1WebhookResponse, 'hatchet-sdk-rest/models/v1_webhook_response' HatchetSdkRest.autoload :V1WebhookSourceName, 'hatchet-sdk-rest/models/v1_webhook_source_name' HatchetSdkRest.autoload :V1WorkflowRun, 'hatchet-sdk-rest/models/v1_workflow_run' HatchetSdkRest.autoload :V1WorkflowRunDetails, 'hatchet-sdk-rest/models/v1_workflow_run_details' diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api/webhook_api.rb b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api/webhook_api.rb index 9aa0020466..66b8b76177 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api/webhook_api.rb +++ b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api/webhook_api.rb @@ -341,7 +341,7 @@ def v1_webhook_list_with_http_info(tenant, opts = {}) # @param tenant [String] The tenant id # @param v1_webhook [String] The webhook name # @param [Hash] opts the optional parameters - # @return [Hash] + # @return [V1WebhookResponse] def v1_webhook_receive(tenant, v1_webhook, opts = {}) data, _status_code, _headers = v1_webhook_receive_with_http_info(tenant, v1_webhook, opts) data @@ -352,7 +352,7 @@ def v1_webhook_receive(tenant, v1_webhook, opts = {}) # @param tenant [String] The tenant id # @param v1_webhook [String] The webhook name # @param [Hash] opts the optional parameters - # @return [Array<(Hash, Integer, Hash)>] Hash data, response status code and response headers + # @return [Array<(V1WebhookResponse, Integer, Hash)>] V1WebhookResponse data, response status code and response headers def v1_webhook_receive_with_http_info(tenant, v1_webhook, opts = {}) if @api_client.config.debugging @api_client.config.logger.debug 'Calling API: WebhookApi.v1_webhook_receive ...' @@ -391,7 +391,7 @@ def v1_webhook_receive_with_http_info(tenant, v1_webhook, opts = {}) post_body = opts[:debug_body] # return_type - return_type = opts[:debug_return_type] || 'Hash' + return_type = opts[:debug_return_type] || 'V1WebhookResponse' # auth_names auth_names = opts[:debug_auth_names] || [] diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api_client.rb b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api_client.rb index 62db0c4ec2..a940aff312 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api_client.rb +++ b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/api_client.rb @@ -357,7 +357,7 @@ def update_params_for_auth!(header_params, query_params, auth_names) when 'header' then header_params[auth_setting[:key]] = auth_setting[:value] when 'query' then query_params[auth_setting[:key]] = auth_setting[:value] when 'cookie' then header_params['Cookie'] = "#{auth_setting[:key]}=#{auth_setting[:value]}" - else next # skip unsupported auth locations + else next end end end diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_update_webhook_request.rb b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_update_webhook_request.rb index 3762276389..ffd60858fa 100644 --- a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_update_webhook_request.rb +++ b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_update_webhook_request.rb @@ -76,8 +76,6 @@ def initialize(attributes = {}) if attributes.key?(:'event_key_expression') self.event_key_expression = attributes[:'event_key_expression'] - else - self.event_key_expression = nil end if attributes.key?(:'scope_expression') @@ -94,10 +92,6 @@ def initialize(attributes = {}) def list_invalid_properties warn '[DEPRECATED] the `list_invalid_properties` method is obsolete' invalid_properties = Array.new - if @event_key_expression.nil? - invalid_properties.push('invalid value for "event_key_expression", event_key_expression cannot be nil.') - end - invalid_properties end @@ -105,20 +99,9 @@ def list_invalid_properties # @return true if the model is valid def valid? warn '[DEPRECATED] the `valid?` method is obsolete' - return false if @event_key_expression.nil? true end - # Custom attribute writer method with validation - # @param [Object] event_key_expression Value to be assigned - def event_key_expression=(event_key_expression) - if event_key_expression.nil? - fail ArgumentError, 'event_key_expression cannot be nil' - end - - @event_key_expression = event_key_expression - end - # Checks equality by comparing each attribute. # @param [Object] Object to be compared def ==(o) diff --git a/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_webhook_response.rb b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_webhook_response.rb new file mode 100644 index 0000000000..652d723886 --- /dev/null +++ b/sdks/ruby/src/lib/hatchet/clients/rest/lib/hatchet-sdk-rest/models/v1_webhook_response.rb @@ -0,0 +1,239 @@ +=begin +#Hatchet API + +#The Hatchet API + +The version of the OpenAPI document: 1.0.0 + +Generated by: https://openapi-generator.tech +Generator version: 7.14.0 + +=end + +require 'date' +require 'time' + +module HatchetSdkRest + class V1WebhookResponse + # The message for the webhook response + attr_accessor :message + + attr_accessor :event + + attr_accessor :challenge + + # Attribute mapping from ruby-style variable name to JSON key. + def self.attribute_map + { + :'message' => :'message', + :'event' => :'event', + :'challenge' => :'challenge' + } + end + + # Returns attribute mapping this model knows about + def self.acceptable_attribute_map + attribute_map + end + + # Returns all the JSON keys this model knows about + def self.acceptable_attributes + acceptable_attribute_map.values + end + + # Attribute type mapping. + def self.openapi_types + { + :'message' => :'String', + :'event' => :'V1Event', + :'challenge' => :'String' + } + end + + # List of attributes with nullable: true + def self.openapi_nullable + Set.new([ + ]) + end + + # Initializes the object + # @param [Hash] attributes Model attributes in the form of hash + def initialize(attributes = {}) + if (!attributes.is_a?(Hash)) + fail ArgumentError, "The input argument (attributes) must be a hash in `HatchetSdkRest::V1WebhookResponse` initialize method" + end + + # check to see if the attribute exists and convert string to symbol for hash key + acceptable_attribute_map = self.class.acceptable_attribute_map + attributes = attributes.each_with_object({}) { |(k, v), h| + if (!acceptable_attribute_map.key?(k.to_sym)) + fail ArgumentError, "`#{k}` is not a valid attribute in `HatchetSdkRest::V1WebhookResponse`. Please check the name to make sure it's valid. List of attributes: " + acceptable_attribute_map.keys.inspect + end + h[k.to_sym] = v + } + + if attributes.key?(:'message') + self.message = attributes[:'message'] + end + + if attributes.key?(:'event') + self.event = attributes[:'event'] + end + + if attributes.key?(:'challenge') + self.challenge = attributes[:'challenge'] + end + end + + # Show invalid properties with the reasons. Usually used together with valid? + # @return Array for valid properties with the reasons + def list_invalid_properties + warn '[DEPRECATED] the `list_invalid_properties` method is obsolete' + invalid_properties = Array.new + invalid_properties + end + + # Check to see if the all the properties in the model are valid + # @return true if the model is valid + def valid? + warn '[DEPRECATED] the `valid?` method is obsolete' + true + end + + # Checks equality by comparing each attribute. + # @param [Object] Object to be compared + def ==(o) + return true if self.equal?(o) + self.class == o.class && + message == o.message && + event == o.event && + challenge == o.challenge + end + + # @see the `==` method + # @param [Object] Object to be compared + def eql?(o) + self == o + end + + # Calculates hash code according to all attributes. + # @return [Integer] Hash code + def hash + [message, event, challenge].hash + end + + # Builds the object from hash + # @param [Hash] attributes Model attributes in the form of hash + # @return [Object] Returns the model itself + def self.build_from_hash(attributes) + return nil unless attributes.is_a?(Hash) + attributes = attributes.transform_keys(&:to_sym) + transformed_hash = {} + openapi_types.each_pair do |key, type| + if attributes.key?(attribute_map[key]) && attributes[attribute_map[key]].nil? + transformed_hash["#{key}"] = nil + elsif type =~ /\AArray<(.*)>/i + # check to ensure the input is an array given that the attribute + # is documented as an array but the input is not + if attributes[attribute_map[key]].is_a?(Array) + transformed_hash["#{key}"] = attributes[attribute_map[key]].map { |v| _deserialize($1, v) } + end + elsif !attributes[attribute_map[key]].nil? + transformed_hash["#{key}"] = _deserialize(type, attributes[attribute_map[key]]) + end + end + new(transformed_hash) + end + + # Deserializes the data based on type + # @param string type Data type + # @param string value Value to be deserialized + # @return [Object] Deserialized data + def self._deserialize(type, value) + case type.to_sym + when :Time + Time.parse(value) + when :Date + Date.parse(value) + when :String + value.to_s + when :Integer + value.to_i + when :Float + value.to_f + when :Boolean + if value.to_s =~ /\A(true|t|yes|y|1)\z/i + true + else + false + end + when :Object + # generic object (usually a Hash), return directly + value + when /\AArray<(?.+)>\z/ + inner_type = Regexp.last_match[:inner_type] + value.map { |v| _deserialize(inner_type, v) } + when /\AHash<(?.+?), (?.+)>\z/ + k_type = Regexp.last_match[:k_type] + v_type = Regexp.last_match[:v_type] + {}.tap do |hash| + value.each do |k, v| + hash[_deserialize(k_type, k)] = _deserialize(v_type, v) + end + end + else # model + # models (e.g. Pet) or oneOf + klass = HatchetSdkRest.const_get(type) + klass.respond_to?(:openapi_any_of) || klass.respond_to?(:openapi_one_of) ? klass.build(value) : klass.build_from_hash(value) + end + end + + # Returns the string representation of the object + # @return [String] String presentation of the object + def to_s + to_hash.to_s + end + + # to_body is an alias to to_hash (backward compatibility) + # @return [Hash] Returns the object in the form of hash + def to_body + to_hash + end + + # Returns the object in the form of hash + # @return [Hash] Returns the object in the form of hash + def to_hash + hash = {} + self.class.attribute_map.each_pair do |attr, param| + value = self.send(attr) + if value.nil? + is_nullable = self.class.openapi_nullable.include?(attr) + next if !is_nullable || (is_nullable && !instance_variable_defined?(:"@#{attr}")) + end + + hash[param] = _to_hash(value) + end + hash + end + + # Outputs non-array value in the form of hash + # For object, use to_hash. Otherwise, just return the value + # @param [Object] value Any valid value + # @return [Hash] Returns the value in the form of hash + def _to_hash(value) + if value.is_a?(Array) + value.compact.map { |v| _to_hash(v) } + elsif value.is_a?(Hash) + {}.tap do |hash| + value.each { |k, v| hash[k] = _to_hash(v) } + end + elsif value.respond_to? :to_hash + value.to_hash + else + value + end + end + + end + +end diff --git a/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_pb.rb b/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_pb.rb index 0abed06a84..e24c29a3f1 100644 --- a/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_pb.rb +++ b/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_pb.rb @@ -7,7 +7,7 @@ require 'google/protobuf/timestamp_pb' -descriptor_data = "\n\x1b\x64ispatcher/dispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n\x0cWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_value\"\xcc\x01\n\x0bRuntimeInfo\x12\x18\n\x0bsdk_version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1d\n\x10language_version\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_sdk_versionB\x0b\n\t_languageB\x13\n\x11_language_versionB\x05\n\x03_osB\x08\n\x06_extra\"\xc1\x02\n\x15WorkerRegisterRequest\x12\x13\n\x0bworker_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x12\n\x05slots\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x17\n\nwebhook_id\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\'\n\x0cruntime_info\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x42\x08\n\x06_slotsB\r\n\x0b_webhook_idB\x0f\n\r_runtime_info\"S\n\x16WorkerRegisterResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x13\n\x0bworker_name\x18\x03 \x01(\t\"\xa4\x01\n\x19UpsertWorkerLabelsRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"B\n\x1aUpsertWorkerLabelsResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\x98\x05\n\x0e\x41ssignedAction\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x0e\n\x06job_id\x18\x04 \x01(\t\x12\x10\n\x08job_name\x18\x05 \x01(\t\x12\x12\n\njob_run_id\x18\x06 \x01(\t\x12\x0f\n\x07task_id\x18\x07 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x08 \x01(\t\x12\x11\n\taction_id\x18\t \x01(\t\x12 \n\x0b\x61\x63tion_type\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x16\n\x0e\x61\x63tion_payload\x18\x0b \x01(\t\x12\x11\n\ttask_name\x18\x0c \x01(\t\x12\x13\n\x0bretry_count\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x18\n\x0bworkflow_id\x18\x13 \x01(\tH\x04\x88\x01\x01\x12 \n\x13workflow_version_id\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\x0e\n\x0c_workflow_idB\x16\n\x14_workflow_version_id\"(\n\x13WorkerListenRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"-\n\x18WorkerUnsubscribeRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"A\n\x19WorkerUnsubscribeResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xec\x01\n\x13GroupKeyActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x11\n\taction_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\nevent_type\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x15\n\revent_payload\x18\x07 \x01(\t\"\xde\x02\n\x0fStepActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x05 \x01(\t\x12\x11\n\taction_id\x18\x06 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\nevent_type\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x15\n\revent_payload\x18\t \x01(\t\x12\x18\n\x0bretry_count\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1d\n\x10should_not_retry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_retry_countB\x13\n\x11_should_not_retry\";\n\x13\x41\x63tionEventResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xcc\x01\n SubscribeToWorkflowEventsRequest\x12\x1c\n\x0fworkflow_run_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_meta_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\"\n\x15\x61\x64\x64itional_meta_value\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x12\n\x10_workflow_run_idB\x16\n\x14_additional_meta_keyB\x18\n\x16_additional_meta_value\"9\n\x1eSubscribeToWorkflowRunsRequest\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"\xe7\x02\n\rWorkflowEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12$\n\rresource_type\x18\x02 \x01(\x0e\x32\r.ResourceType\x12&\n\nevent_type\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x13\n\x0bresource_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\revent_payload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x19\n\x0ctask_retries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x18\n\x0bretry_count\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x18\n\x0b\x65vent_index\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0f\n\r_task_retriesB\x0e\n\x0c_retry_countB\x0e\n\x0c_event_index\"\xac\x01\n\x10WorkflowRunEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12)\n\nevent_type\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x33\n\x0f\x65vent_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x92\x01\n\rStepRunResult\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"c\n\rOverridesData\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x17\n\x0f\x63\x61ller_filename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"W\n\x10HeartbeatRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x30\n\x0cheartbeat_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"S\n\x15RefreshTimeoutRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x1c\n\x14increment_timeout_by\x18\x02 \x01(\t\"H\n\x16RefreshTimeoutResponse\x12.\n\ntimeout_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x12ReleaseSlotRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse*A\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03\x12\x08\n\x04RUBY\x10\x04*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xf8\x06\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3" +descriptor_data = "\n\x1b\x64ispatcher/dispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n\x0cWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_value\"\xcc\x01\n\x0bRuntimeInfo\x12\x18\n\x0bsdk_version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1d\n\x10language_version\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_sdk_versionB\x0b\n\t_languageB\x13\n\x11_language_versionB\x05\n\x03_osB\x08\n\x06_extra\"\xb1\x03\n\x15WorkerRegisterRequest\x12\x13\n\x0bworker_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x12\n\x05slots\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x17\n\nwebhook_id\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\'\n\x0cruntime_info\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x12;\n\x0bslot_config\x18\t \x03(\x0b\x32&.WorkerRegisterRequest.SlotConfigEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x1a\x31\n\x0fSlotConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_slotsB\r\n\x0b_webhook_idB\x0f\n\r_runtime_info\"S\n\x16WorkerRegisterResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x13\n\x0bworker_name\x18\x03 \x01(\t\"\xa4\x01\n\x19UpsertWorkerLabelsRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"B\n\x1aUpsertWorkerLabelsResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\x98\x05\n\x0e\x41ssignedAction\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x0e\n\x06job_id\x18\x04 \x01(\t\x12\x10\n\x08job_name\x18\x05 \x01(\t\x12\x12\n\njob_run_id\x18\x06 \x01(\t\x12\x0f\n\x07task_id\x18\x07 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x08 \x01(\t\x12\x11\n\taction_id\x18\t \x01(\t\x12 \n\x0b\x61\x63tion_type\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x16\n\x0e\x61\x63tion_payload\x18\x0b \x01(\t\x12\x11\n\ttask_name\x18\x0c \x01(\t\x12\x13\n\x0bretry_count\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x18\n\x0bworkflow_id\x18\x13 \x01(\tH\x04\x88\x01\x01\x12 \n\x13workflow_version_id\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\x0e\n\x0c_workflow_idB\x16\n\x14_workflow_version_id\"(\n\x13WorkerListenRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"-\n\x18WorkerUnsubscribeRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"A\n\x19WorkerUnsubscribeResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xec\x01\n\x13GroupKeyActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x11\n\taction_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\nevent_type\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x15\n\revent_payload\x18\x07 \x01(\t\"\xde\x02\n\x0fStepActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x05 \x01(\t\x12\x11\n\taction_id\x18\x06 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\nevent_type\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x15\n\revent_payload\x18\t \x01(\t\x12\x18\n\x0bretry_count\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1d\n\x10should_not_retry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_retry_countB\x13\n\x11_should_not_retry\";\n\x13\x41\x63tionEventResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xcc\x01\n SubscribeToWorkflowEventsRequest\x12\x1c\n\x0fworkflow_run_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_meta_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\"\n\x15\x61\x64\x64itional_meta_value\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x12\n\x10_workflow_run_idB\x16\n\x14_additional_meta_keyB\x18\n\x16_additional_meta_value\"9\n\x1eSubscribeToWorkflowRunsRequest\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"\xe7\x02\n\rWorkflowEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12$\n\rresource_type\x18\x02 \x01(\x0e\x32\r.ResourceType\x12&\n\nevent_type\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x13\n\x0bresource_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\revent_payload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x19\n\x0ctask_retries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x18\n\x0bretry_count\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x18\n\x0b\x65vent_index\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0f\n\r_task_retriesB\x0e\n\x0c_retry_countB\x0e\n\x0c_event_index\"\xac\x01\n\x10WorkflowRunEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12)\n\nevent_type\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x33\n\x0f\x65vent_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x92\x01\n\rStepRunResult\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"c\n\rOverridesData\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x17\n\x0f\x63\x61ller_filename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"W\n\x10HeartbeatRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x30\n\x0cheartbeat_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"S\n\x15RefreshTimeoutRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x1c\n\x14increment_timeout_by\x18\x02 \x01(\t\"H\n\x16RefreshTimeoutResponse\x12.\n\ntimeout_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x12ReleaseSlotRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse\"\x13\n\x11GetVersionRequest\"%\n\x12GetVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\t*A\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03\x12\x08\n\x04RUBY\x10\x04*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xb1\x07\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x12\x37\n\nGetVersion\x12\x12.GetVersionRequest\x1a\x13.GetVersionResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3" pool = ::Google::Protobuf::DescriptorPool.generated_pool pool.add_serialized_file(descriptor_data) @@ -38,6 +38,8 @@ RefreshTimeoutResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("RefreshTimeoutResponse").msgclass ReleaseSlotRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("ReleaseSlotRequest").msgclass ReleaseSlotResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("ReleaseSlotResponse").msgclass +GetVersionRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("GetVersionRequest").msgclass +GetVersionResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("GetVersionResponse").msgclass SDKS = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("SDKS").enummodule ActionType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("ActionType").enummodule GroupKeyActionEventType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("GroupKeyActionEventType").enummodule diff --git a/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_services_pb.rb b/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_services_pb.rb index e3fd772b51..9684ac60bf 100644 --- a/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_services_pb.rb +++ b/sdks/ruby/src/lib/hatchet/contracts/dispatcher/dispatcher_services_pb.rb @@ -29,6 +29,10 @@ class Service rpc :RefreshTimeout, ::RefreshTimeoutRequest, ::RefreshTimeoutResponse rpc :ReleaseSlot, ::ReleaseSlotRequest, ::ReleaseSlotResponse rpc :UpsertWorkerLabels, ::UpsertWorkerLabelsRequest, ::UpsertWorkerLabelsResponse + # GetVersion returns the dispatcher protocol version as a simple integer. + # SDKs use this to determine feature support (e.g. slot_config registration). + # Old engines that do not implement this RPC will return UNIMPLEMENTED. + rpc :GetVersion, ::GetVersionRequest, ::GetVersionResponse end Stub = Service.rpc_stub_class diff --git a/sdks/ruby/src/lib/hatchet/contracts/v1/workflows_pb.rb b/sdks/ruby/src/lib/hatchet/contracts/v1/workflows_pb.rb index 3d540a3f2d..78c917369b 100644 --- a/sdks/ruby/src/lib/hatchet/contracts/v1/workflows_pb.rb +++ b/sdks/ruby/src/lib/hatchet/contracts/v1/workflows_pb.rb @@ -8,7 +8,7 @@ require 'v1/shared/condition_pb' -descriptor_data = "\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"[\n\x12\x43\x61ncelTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"[\n\x12ReplayTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\x82\x01\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xac\x04\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilter\x12\x1e\n\x11input_json_schema\x18\x0e \x01(\x0cH\x04\x88\x01\x01\x42\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priorityB\x14\n\x12_input_json_schema\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe8\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb1\x04\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t\"+\n\x14GetRunDetailsRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\x96\x01\n\rTaskRunDetail\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12\x13\n\x0breadable_id\x18\x05 \x01(\tB\x08\n\x06_errorB\t\n\x07_output\"\xf0\x01\n\x15GetRunDetailsResponse\x12\r\n\x05input\x18\x01 \x01(\x0c\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12:\n\ttask_runs\x18\x03 \x03(\x0b\x32\'.v1.GetRunDetailsResponse.TaskRunsEntry\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x01(\x0c\x1a\x42\n\rTaskRunsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.v1.TaskRunDetail:\x02\x38\x01*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*N\n\tRunStatus\x12\n\n\x06QUEUED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xfd\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponse\x12\x44\n\rGetRunDetails\x12\x18.v1.GetRunDetailsRequest\x1a\x19.v1.GetRunDetailsResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3" +descriptor_data = "\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"[\n\x12\x43\x61ncelTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"[\n\x12ReplayTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\x82\x01\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xac\x04\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilter\x12\x1e\n\x11input_json_schema\x18\x0e \x01(\x0cH\x04\x88\x01\x01\x42\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priorityB\x14\n\x12_input_json_schema\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe8\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb7\x05\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x12\x12\n\nis_durable\x18\x0e \x01(\x08\x12;\n\rslot_requests\x18\x0f \x03(\x0b\x32$.v1.CreateTaskOpts.SlotRequestsEntry\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x1a\x33\n\x11SlotRequestsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t\"+\n\x14GetRunDetailsRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\x96\x01\n\rTaskRunDetail\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12\x13\n\x0breadable_id\x18\x05 \x01(\tB\x08\n\x06_errorB\t\n\x07_output\"\xf0\x01\n\x15GetRunDetailsResponse\x12\r\n\x05input\x18\x01 \x01(\x0c\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12:\n\ttask_runs\x18\x03 \x03(\x0b\x32\'.v1.GetRunDetailsResponse.TaskRunsEntry\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x01(\x0c\x1a\x42\n\rTaskRunsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.v1.TaskRunDetail:\x02\x38\x01*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*N\n\tRunStatus\x12\n\n\x06QUEUED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xfd\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponse\x12\x44\n\rGetRunDetails\x12\x18.v1.GetRunDetailsRequest\x1a\x19.v1.GetRunDetailsResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3" pool = ::Google::Protobuf::DescriptorPool.generated_pool pool.add_serialized_file(descriptor_data) diff --git a/sdks/typescript/src/clients/dispatcher/dispatcher-client.ts b/sdks/typescript/src/clients/dispatcher/dispatcher-client.ts index e7eb9b63d5..73c952fbd9 100644 --- a/sdks/typescript/src/clients/dispatcher/dispatcher-client.ts +++ b/sdks/typescript/src/clients/dispatcher/dispatcher-client.ts @@ -16,6 +16,7 @@ import { Logger } from '@hatchet/util/logger'; import { retrier } from '@hatchet/util/retrier'; import { HATCHET_VERSION } from '@hatchet/version'; +import { SlotConfig, SlotType } from '@hatchet/v1/slot-types'; import { ActionListener } from './action-listener'; export type WorkerLabels = Record; @@ -24,6 +25,12 @@ interface GetActionListenerOptions { workerName: string; services: string[]; actions: string[]; + slotConfig?: SlotConfig; + /** @deprecated use slotConfig */ + slots?: number; + /** @deprecated use slotConfig */ + durableSlots?: number; + /** @deprecated use slots */ maxRuns?: number; labels: Record; } @@ -57,10 +64,22 @@ export class DispatcherClient { async getActionListener(options: GetActionListenerOptions) { // Register the worker - const { maxRuns, ...rest } = options; + const slotConfig = + options.slotConfig || + (options.slots || options.durableSlots || options.maxRuns + ? { + ...(options.slots || options.maxRuns + ? { [SlotType.Default]: options.slots || options.maxRuns || 0 } + : {}), + ...(options.durableSlots ? { [SlotType.Durable]: options.durableSlots } : {}), + } + : undefined); + const registration = await this.client.register({ - ...rest, - slots: maxRuns, + workerName: options.workerName, + services: options.services, + actions: options.actions, + slotConfig, labels: options.labels ? mapLabels(options.labels) : undefined, runtimeInfo: this.getRuntimeInfo(), }); @@ -68,6 +87,15 @@ export class DispatcherClient { return new ActionListener(this, registration.workerId); } + /** + * Calls the GetVersion RPC. Returns the engine semantic version string. + * Throws a gRPC error with code UNIMPLEMENTED on older engines. + */ + async getVersion(): Promise { + const response = await this.client.getVersion({}); + return response.version; + } + async sendStepActionEvent(in_: StepActionEventInput) { const { taskId, taskRunExternalId, ...rest } = in_; const event: StepActionEvent = { @@ -120,7 +148,7 @@ export class DispatcherClient { } } -function mapLabels(in_: WorkerLabels): Record { +export function mapLabels(in_: WorkerLabels): Record { return Object.entries(in_).reduce>( (acc, [key, value]) => ({ ...acc, diff --git a/sdks/typescript/src/clients/rest/generated/data-contracts.ts b/sdks/typescript/src/clients/rest/generated/data-contracts.ts index 51d38addcd..86199fca9a 100644 --- a/sdks/typescript/src/clients/rest/generated/data-contracts.ts +++ b/sdks/typescript/src/clients/rest/generated/data-contracts.ts @@ -1569,6 +1569,10 @@ export interface Step { action: string; /** The timeout of the step. */ timeout?: string; + /** Whether the step is durable. */ + isDurable?: boolean; + /** Slot requests for the step (slot_type -> units). */ + slotRequests?: Record; children?: string[]; parents?: string[]; } @@ -2147,6 +2151,14 @@ export interface RecentStepRuns { workflowRunId: string; } +/** Slot availability and limits for a slot type. */ +export interface WorkerSlotConfig { + /** The number of available units for this slot type. */ + available?: number; + /** The maximum number of units for this slot type. */ + limit: number; +} + export interface WorkerLabel { metadata: APIResourceMeta; /** The key of the label. */ @@ -2190,10 +2202,8 @@ export interface Worker { recentStepRuns?: RecentStepRuns[]; /** The status of the worker. */ status?: 'ACTIVE' | 'INACTIVE' | 'PAUSED'; - /** The maximum number of runs this worker can execute concurrently. */ - maxRuns?: number; - /** The number of runs this worker can execute concurrently. */ - availableRuns?: number; + /** Slot availability and limits for this worker (slot_type -> { available, limit }). */ + slotConfig?: Record; /** * the id of the assigned dispatcher, in UUID format * @format uuid diff --git a/sdks/typescript/src/protoc/dispatcher/dispatcher.ts b/sdks/typescript/src/protoc/dispatcher/dispatcher.ts index ffc06f9481..f933ce3bbe 100644 --- a/sdks/typescript/src/protoc/dispatcher/dispatcher.ts +++ b/sdks/typescript/src/protoc/dispatcher/dispatcher.ts @@ -347,7 +347,10 @@ export interface WorkerRegisterRequest { actions: string[]; /** (optional) the services for this worker */ services: string[]; - /** (optional) the number of slots this worker can handle */ + /** + * (optional) the number of default slots this worker can handle + * deprecated: use slot_config instead + */ slots?: number | undefined; /** (optional) worker labels (i.e. state or other metadata) */ labels: { [key: string]: WorkerLabels }; @@ -355,6 +358,8 @@ export interface WorkerRegisterRequest { webhookId?: string | undefined; /** (optional) information regarding the runtime environment of the worker */ runtimeInfo?: RuntimeInfo | undefined; + /** (optional) slot config for this worker (slot_type -> units) */ + slotConfig: { [key: string]: number }; } export interface WorkerRegisterRequest_LabelsEntry { @@ -362,6 +367,11 @@ export interface WorkerRegisterRequest_LabelsEntry { value: WorkerLabels | undefined; } +export interface WorkerRegisterRequest_SlotConfigEntry { + key: string; + value: number; +} + export interface WorkerRegisterResponse { /** the tenant id */ tenantId: string; @@ -586,6 +596,12 @@ export interface ReleaseSlotRequest { export interface ReleaseSlotResponse {} +export interface GetVersionRequest {} + +export interface GetVersionResponse { + version: string; +} + function createBaseWorkerLabels(): WorkerLabels { return { strValue: undefined, intValue: undefined }; } @@ -803,6 +819,7 @@ function createBaseWorkerRegisterRequest(): WorkerRegisterRequest { labels: {}, webhookId: undefined, runtimeInfo: undefined, + slotConfig: {}, }; } @@ -832,6 +849,12 @@ export const WorkerRegisterRequest: MessageFns = { if (message.runtimeInfo !== undefined) { RuntimeInfo.encode(message.runtimeInfo, writer.uint32(58).fork()).join(); } + Object.entries(message.slotConfig).forEach(([key, value]) => { + WorkerRegisterRequest_SlotConfigEntry.encode( + { key: key as any, value }, + writer.uint32(74).fork() + ).join(); + }); return writer; }, @@ -901,6 +924,17 @@ export const WorkerRegisterRequest: MessageFns = { message.runtimeInfo = RuntimeInfo.decode(reader, reader.uint32()); continue; } + case 9: { + if (tag !== 74) { + break; + } + + const entry9 = WorkerRegisterRequest_SlotConfigEntry.decode(reader, reader.uint32()); + if (entry9.value !== undefined) { + message.slotConfig[entry9.key] = entry9.value; + } + continue; + } } if ((tag & 7) === 4 || tag === 0) { break; @@ -931,6 +965,15 @@ export const WorkerRegisterRequest: MessageFns = { : {}, webhookId: isSet(object.webhookId) ? globalThis.String(object.webhookId) : undefined, runtimeInfo: isSet(object.runtimeInfo) ? RuntimeInfo.fromJSON(object.runtimeInfo) : undefined, + slotConfig: isObject(object.slotConfig) + ? Object.entries(object.slotConfig).reduce<{ [key: string]: number }>( + (acc, [key, value]) => { + acc[key] = Number(value); + return acc; + }, + {} + ) + : {}, }; }, @@ -963,6 +1006,15 @@ export const WorkerRegisterRequest: MessageFns = { if (message.runtimeInfo !== undefined) { obj.runtimeInfo = RuntimeInfo.toJSON(message.runtimeInfo); } + if (message.slotConfig) { + const entries = Object.entries(message.slotConfig); + if (entries.length > 0) { + obj.slotConfig = {}; + entries.forEach(([k, v]) => { + obj.slotConfig[k] = Math.round(v); + }); + } + } return obj; }, @@ -989,6 +1041,15 @@ export const WorkerRegisterRequest: MessageFns = { object.runtimeInfo !== undefined && object.runtimeInfo !== null ? RuntimeInfo.fromPartial(object.runtimeInfo) : undefined; + message.slotConfig = Object.entries(object.slotConfig ?? {}).reduce<{ [key: string]: number }>( + (acc, [key, value]) => { + if (value !== undefined) { + acc[key] = globalThis.Number(value); + } + return acc; + }, + {} + ); return message; }, }; @@ -1077,6 +1138,93 @@ export const WorkerRegisterRequest_LabelsEntry: MessageFns = + { + encode( + message: WorkerRegisterRequest_SlotConfigEntry, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.key !== '') { + writer.uint32(10).string(message.key); + } + if (message.value !== 0) { + writer.uint32(16).int32(message.value); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number + ): WorkerRegisterRequest_SlotConfigEntry { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseWorkerRegisterRequest_SlotConfigEntry(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.key = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.value = reader.int32(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): WorkerRegisterRequest_SlotConfigEntry { + return { + key: isSet(object.key) ? globalThis.String(object.key) : '', + value: isSet(object.value) ? globalThis.Number(object.value) : 0, + }; + }, + + toJSON(message: WorkerRegisterRequest_SlotConfigEntry): unknown { + const obj: any = {}; + if (message.key !== '') { + obj.key = message.key; + } + if (message.value !== 0) { + obj.value = Math.round(message.value); + } + return obj; + }, + + create( + base?: DeepPartial + ): WorkerRegisterRequest_SlotConfigEntry { + return WorkerRegisterRequest_SlotConfigEntry.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): WorkerRegisterRequest_SlotConfigEntry { + const message = createBaseWorkerRegisterRequest_SlotConfigEntry(); + message.key = object.key ?? ''; + message.value = object.value ?? 0; + return message; + }, + }; + function createBaseWorkerRegisterResponse(): WorkerRegisterResponse { return { tenantId: '', workerId: '', workerName: '' }; } @@ -3660,6 +3808,107 @@ export const ReleaseSlotResponse: MessageFns = { }, }; +function createBaseGetVersionRequest(): GetVersionRequest { + return {}; +} + +export const GetVersionRequest: MessageFns = { + encode(_: GetVersionRequest, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GetVersionRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGetVersionRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): GetVersionRequest { + return {}; + }, + + toJSON(_: GetVersionRequest): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): GetVersionRequest { + return GetVersionRequest.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): GetVersionRequest { + const message = createBaseGetVersionRequest(); + return message; + }, +}; + +function createBaseGetVersionResponse(): GetVersionResponse { + return { version: '' }; +} + +export const GetVersionResponse: MessageFns = { + encode(message: GetVersionResponse, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.version !== '') { + writer.uint32(10).string(message.version); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GetVersionResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGetVersionResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.version = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GetVersionResponse { + return { version: isSet(object.version) ? globalThis.String(object.version) : '' }; + }, + + toJSON(message: GetVersionResponse): unknown { + const obj: any = {}; + if (message.version !== '') { + obj.version = message.version; + } + return obj; + }, + + create(base?: DeepPartial): GetVersionResponse { + return GetVersionResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GetVersionResponse { + const message = createBaseGetVersionResponse(); + message.version = object.version ?? ''; + return message; + }, +}; + export type DispatcherDefinition = typeof DispatcherDefinition; export const DispatcherDefinition = { name: 'Dispatcher', @@ -3774,6 +4023,19 @@ export const DispatcherDefinition = { responseStream: false, options: {}, }, + /** + * GetVersion returns the dispatcher protocol version as a simple integer. + * SDKs use this to determine feature support (e.g. slot_config registration). + * Old engines that do not implement this RPC will return UNIMPLEMENTED. + */ + getVersion: { + name: 'GetVersion', + requestType: GetVersionRequest, + requestStream: false, + responseType: GetVersionResponse, + responseStream: false, + options: {}, + }, }, } as const; @@ -3835,6 +4097,15 @@ export interface DispatcherServiceImplementation { request: UpsertWorkerLabelsRequest, context: CallContext & CallContextExt ): Promise>; + /** + * GetVersion returns the dispatcher protocol version as a simple integer. + * SDKs use this to determine feature support (e.g. slot_config registration). + * Old engines that do not implement this RPC will return UNIMPLEMENTED. + */ + getVersion( + request: GetVersionRequest, + context: CallContext & CallContextExt + ): Promise>; } export interface DispatcherClient { @@ -3895,6 +4166,15 @@ export interface DispatcherClient { request: DeepPartial, options?: CallOptions & CallOptionsExt ): Promise; + /** + * GetVersion returns the dispatcher protocol version as a simple integer. + * SDKs use this to determine feature support (e.g. slot_config registration). + * Old engines that do not implement this RPC will return UNIMPLEMENTED. + */ + getVersion( + request: DeepPartial, + options?: CallOptions & CallOptionsExt + ): Promise; } type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; diff --git a/sdks/typescript/src/protoc/v1/workflows.ts b/sdks/typescript/src/protoc/v1/workflows.ts index 714362f4e7..682a2bbc82 100644 --- a/sdks/typescript/src/protoc/v1/workflows.ts +++ b/sdks/typescript/src/protoc/v1/workflows.ts @@ -409,6 +409,10 @@ export interface CreateTaskOpts { conditions?: TaskConditions | undefined; /** (optional) the timeout for the schedule */ scheduleTimeout?: string | undefined; + /** (optional) whether the task is durable */ + isDurable: boolean; + /** (optional) slot requests (slot_type -> units) */ + slotRequests: { [key: string]: number }; } export interface CreateTaskOpts_WorkerLabelsEntry { @@ -416,6 +420,11 @@ export interface CreateTaskOpts_WorkerLabelsEntry { value: DesiredWorkerLabels | undefined; } +export interface CreateTaskOpts_SlotRequestsEntry { + key: string; + value: number; +} + export interface CreateTaskRateLimit { /** (required) the key for the rate limit */ key: string; @@ -1715,6 +1724,8 @@ function createBaseCreateTaskOpts(): CreateTaskOpts { concurrency: [], conditions: undefined, scheduleTimeout: undefined, + isDurable: false, + slotRequests: {}, }; } @@ -1762,6 +1773,15 @@ export const CreateTaskOpts: MessageFns = { if (message.scheduleTimeout !== undefined) { writer.uint32(106).string(message.scheduleTimeout); } + if (message.isDurable !== false) { + writer.uint32(112).bool(message.isDurable); + } + Object.entries(message.slotRequests).forEach(([key, value]) => { + CreateTaskOpts_SlotRequestsEntry.encode( + { key: key as any, value }, + writer.uint32(122).fork() + ).join(); + }); return writer; }, @@ -1879,6 +1899,25 @@ export const CreateTaskOpts: MessageFns = { message.scheduleTimeout = reader.string(); continue; } + case 14: { + if (tag !== 112) { + break; + } + + message.isDurable = reader.bool(); + continue; + } + case 15: { + if (tag !== 122) { + break; + } + + const entry15 = CreateTaskOpts_SlotRequestsEntry.decode(reader, reader.uint32()); + if (entry15.value !== undefined) { + message.slotRequests[entry15.key] = entry15.value; + } + continue; + } } if ((tag & 7) === 4 || tag === 0) { break; @@ -1923,6 +1962,16 @@ export const CreateTaskOpts: MessageFns = { scheduleTimeout: isSet(object.scheduleTimeout) ? globalThis.String(object.scheduleTimeout) : undefined, + isDurable: isSet(object.isDurable) ? globalThis.Boolean(object.isDurable) : false, + slotRequests: isObject(object.slotRequests) + ? Object.entries(object.slotRequests).reduce<{ [key: string]: number }>( + (acc, [key, value]) => { + acc[key] = Number(value); + return acc; + }, + {} + ) + : {}, }; }, @@ -1973,6 +2022,18 @@ export const CreateTaskOpts: MessageFns = { if (message.scheduleTimeout !== undefined) { obj.scheduleTimeout = message.scheduleTimeout; } + if (message.isDurable !== false) { + obj.isDurable = message.isDurable; + } + if (message.slotRequests) { + const entries = Object.entries(message.slotRequests); + if (entries.length > 0) { + obj.slotRequests = {}; + entries.forEach(([k, v]) => { + obj.slotRequests[k] = Math.round(v); + }); + } + } return obj; }, @@ -2004,6 +2065,15 @@ export const CreateTaskOpts: MessageFns = { ? TaskConditions.fromPartial(object.conditions) : undefined; message.scheduleTimeout = object.scheduleTimeout ?? undefined; + message.isDurable = object.isDurable ?? false; + message.slotRequests = Object.entries(object.slotRequests ?? {}).reduce<{ + [key: string]: number; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = globalThis.Number(value); + } + return acc; + }, {}); return message; }, }; @@ -2092,6 +2162,87 @@ export const CreateTaskOpts_WorkerLabelsEntry: MessageFns = { + encode( + message: CreateTaskOpts_SlotRequestsEntry, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.key !== '') { + writer.uint32(10).string(message.key); + } + if (message.value !== 0) { + writer.uint32(16).int32(message.value); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): CreateTaskOpts_SlotRequestsEntry { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseCreateTaskOpts_SlotRequestsEntry(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.key = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.value = reader.int32(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): CreateTaskOpts_SlotRequestsEntry { + return { + key: isSet(object.key) ? globalThis.String(object.key) : '', + value: isSet(object.value) ? globalThis.Number(object.value) : 0, + }; + }, + + toJSON(message: CreateTaskOpts_SlotRequestsEntry): unknown { + const obj: any = {}; + if (message.key !== '') { + obj.key = message.key; + } + if (message.value !== 0) { + obj.value = Math.round(message.value); + } + return obj; + }, + + create(base?: DeepPartial): CreateTaskOpts_SlotRequestsEntry { + return CreateTaskOpts_SlotRequestsEntry.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): CreateTaskOpts_SlotRequestsEntry { + const message = createBaseCreateTaskOpts_SlotRequestsEntry(); + message.key = object.key ?? ''; + message.value = object.value ?? 0; + return message; + }, +}; + function createBaseCreateTaskRateLimit(): CreateTaskRateLimit { return { key: '', diff --git a/sdks/typescript/src/step.ts b/sdks/typescript/src/step.ts index 2f5e8bb58a..51b2820c41 100644 --- a/sdks/typescript/src/step.ts +++ b/sdks/typescript/src/step.ts @@ -758,7 +758,7 @@ export function mapRateLimit(limits: CreateStep['rate_limits']): Creat } // Helper function to validate CEL expressions -function validateCelExpression(expr: string): boolean { +function validateCelExpression(_expr: string): boolean { // This is a placeholder. In a real implementation, you'd need to use a CEL parser or validator. // For now, we'll just return true to mimic the behavior. return true; diff --git a/sdks/typescript/src/v1/client/client.ts b/sdks/typescript/src/v1/client/client.ts index 82f9ba49c8..19cd729a79 100644 --- a/sdks/typescript/src/v1/client/client.ts +++ b/sdks/typescript/src/v1/client/client.ts @@ -143,7 +143,7 @@ export class HatchetClient implements IHatchetClient { ); } }) - .catch((error) => { + .catch(() => { // Do nothing here }); } catch (e) { diff --git a/sdks/typescript/src/v1/client/worker/deprecated/deprecation.test.ts b/sdks/typescript/src/v1/client/worker/deprecated/deprecation.test.ts new file mode 100644 index 0000000000..ad412fcfbb --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/deprecation.test.ts @@ -0,0 +1,58 @@ +import { parseSemver, semverLessThan } from './deprecation'; + +describe('parseSemver', () => { + it('parses a standard version with v prefix', () => { + expect(parseSemver('v0.78.23')).toEqual([0, 78, 23]); + }); + + it('parses a version without v prefix', () => { + expect(parseSemver('1.2.3')).toEqual([1, 2, 3]); + }); + + it('strips pre-release suffix', () => { + expect(parseSemver('v0.1.0-alpha.0')).toEqual([0, 1, 0]); + expect(parseSemver('v10.20.30-rc.1')).toEqual([10, 20, 30]); + }); + + it('returns [0,0,0] for empty string', () => { + expect(parseSemver('')).toEqual([0, 0, 0]); + }); + + it('returns [0,0,0] for malformed input', () => { + expect(parseSemver('v1.2')).toEqual([0, 0, 0]); + expect(parseSemver('not-a-version')).toEqual([0, 0, 0]); + }); +}); + +describe('semverLessThan', () => { + it('returns true when a < b (patch)', () => { + expect(semverLessThan('v0.78.22', 'v0.78.23')).toBe(true); + }); + + it('returns false when a == b', () => { + expect(semverLessThan('v0.78.23', 'v0.78.23')).toBe(false); + }); + + it('returns false when a > b (patch)', () => { + expect(semverLessThan('v0.78.24', 'v0.78.23')).toBe(false); + }); + + it('compares minor versions correctly', () => { + expect(semverLessThan('v0.77.99', 'v0.78.0')).toBe(true); + expect(semverLessThan('v0.79.0', 'v0.78.99')).toBe(false); + }); + + it('compares major versions correctly', () => { + expect(semverLessThan('v0.78.23', 'v1.0.0')).toBe(true); + expect(semverLessThan('v1.0.0', 'v0.99.99')).toBe(false); + }); + + it('handles pre-release versions', () => { + expect(semverLessThan('v0.1.0-alpha.0', 'v0.78.23')).toBe(true); + }); + + it('treats empty string as 0.0.0', () => { + expect(semverLessThan('', 'v0.78.23')).toBe(true); + expect(semverLessThan('v0.78.23', '')).toBe(false); + }); +}); diff --git a/sdks/typescript/src/v1/client/worker/deprecated/deprecation.ts b/sdks/typescript/src/v1/client/worker/deprecated/deprecation.ts new file mode 100644 index 0000000000..529a1eb6af --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/deprecation.ts @@ -0,0 +1,109 @@ +/** + * Generic time-aware deprecation helper. + * + * Timeline (from a given start date, with configurable windows): + * 0 to warnDays: WARNING logged once per feature + * warnDays to errorDays: ERROR logged once per feature + * after errorDays: throws an error 1-in-5 calls (20% chance) + * + * Defaults: warnDays=90, errorDays=undefined (error phase disabled unless set). + */ + +import { Logger } from '@hatchet/util/logger'; + +const DEFAULT_WARN_DAYS = 90; +const MS_PER_DAY = 24 * 60 * 60 * 1000; + +/** Tracks which features have already been logged (keyed by feature name). */ +const alreadyLogged = new Set(); + +export class DeprecationError extends Error { + feature: string; + + constructor(feature: string, message: string) { + super(`${feature}: ${message}`); + this.name = 'DeprecationError'; + this.feature = feature; + } +} + +export interface DeprecationOpts { + /** Days after start during which a warning is logged. Defaults to 90. */ + warnDays?: number; + /** Days after start during which an error is logged. + * After this window, calls have a 20% chance of throwing. + * If undefined (default), the error/raise phase is never reached — + * the notice stays at error-level logging indefinitely. */ + errorDays?: number; +} + +/** + * Emit a time-aware deprecation notice. + * + * @param feature - A short identifier for deduplication (each feature logs once). + * @param message - The human-readable deprecation message. + * @param start - The Date when the deprecation window began. + * @param logger - A Logger instance for outputting warnings/errors. + * @param opts - Optional configuration for time windows. + * @throws DeprecationError after the errorDays window (~20% chance). + */ +/** + * Parses a semver string like "v0.78.23" into [major, minor, patch]. + * Returns [0, 0, 0] if parsing fails. + */ +export function parseSemver(v: string): [number, number, number] { + let s = v.startsWith('v') ? v.slice(1) : v; + const dashIdx = s.indexOf('-'); + if (dashIdx !== -1) s = s.slice(0, dashIdx); + const parts = s.split('.'); + if (parts.length !== 3) return [0, 0, 0]; + return [parseInt(parts[0], 10) || 0, parseInt(parts[1], 10) || 0, parseInt(parts[2], 10) || 0]; +} + +/** + * Returns true if semver string a is strictly less than b. + */ +export function semverLessThan(a: string, b: string): boolean { + const [aMaj, aMin, aPat] = parseSemver(a); + const [bMaj, bMin, bPat] = parseSemver(b); + if (aMaj !== bMaj) return aMaj < bMaj; + if (aMin !== bMin) return aMin < bMin; + return aPat < bPat; +} + +export function emitDeprecationNotice( + feature: string, + message: string, + start: Date, + logger: Logger, + opts?: DeprecationOpts +): void { + const warnMs = (opts?.warnDays ?? DEFAULT_WARN_DAYS) * MS_PER_DAY; + const errorDays = opts?.errorDays; + const errorMs = errorDays != null ? errorDays * MS_PER_DAY : undefined; + const elapsed = Date.now() - start.getTime(); + + if (elapsed < warnMs) { + // Phase 1: warning + if (!alreadyLogged.has(feature)) { + logger.warn(message); + alreadyLogged.add(feature); + } + } else if (errorMs === undefined || elapsed < errorMs) { + // Phase 2: error-level log (indefinite when errorDays is not set) + if (!alreadyLogged.has(feature)) { + logger.error(`${message} This fallback will be removed soon. Upgrade immediately.`); + alreadyLogged.add(feature); + } + } else { + // Phase 3: throw 1-in-5 times + if (!alreadyLogged.has(feature)) { + logger.error(`${message} This fallback is no longer supported and will fail intermittently.`); + alreadyLogged.add(feature); + } + + if (Math.random() < 0.2) { + throw new DeprecationError(feature, message); + } + } +} diff --git a/sdks/typescript/src/v1/client/worker/deprecated/index.ts b/sdks/typescript/src/v1/client/worker/deprecated/index.ts new file mode 100644 index 0000000000..cc28a8e35e --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/index.ts @@ -0,0 +1,9 @@ +export { isLegacyEngine, LegacyDualWorker } from './legacy-worker'; +export { LegacyV1Worker } from './legacy-v1-worker'; +export { legacyGetActionListener } from './legacy-registration'; +export { + emitDeprecationNotice, + DeprecationError, + parseSemver, + semverLessThan, +} from './deprecation'; diff --git a/sdks/typescript/src/v1/client/worker/deprecated/legacy-registration.ts b/sdks/typescript/src/v1/client/worker/deprecated/legacy-registration.ts new file mode 100644 index 0000000000..7e38434245 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/legacy-registration.ts @@ -0,0 +1,39 @@ +/** + * Legacy worker registration using the deprecated `slots` proto field + * instead of `slotConfig`. For backward compatibility with engines + * that do not support multiple slot types. + */ + +import { + DispatcherClient, + mapLabels, + WorkerLabels, +} from '@hatchet/clients/dispatcher/dispatcher-client'; +import { ActionListener } from '@clients/dispatcher/action-listener'; + +export interface LegacyRegistrationOptions { + workerName: string; + services: string[]; + actions: string[]; + slots: number; + labels: WorkerLabels; +} + +/** + * Registers a worker using the legacy `slots` proto field instead of `slotConfig`. + */ +export async function legacyGetActionListener( + dispatcher: DispatcherClient, + options: LegacyRegistrationOptions +): Promise { + const registration = await dispatcher.client.register({ + workerName: options.workerName, + services: options.services, + actions: options.actions, + slots: options.slots, + labels: options.labels ? mapLabels(options.labels) : undefined, + runtimeInfo: dispatcher.getRuntimeInfo(), + }); + + return new ActionListener(dispatcher, registration.workerId); +} diff --git a/sdks/typescript/src/v1/client/worker/deprecated/legacy-v1-worker.ts b/sdks/typescript/src/v1/client/worker/deprecated/legacy-v1-worker.ts new file mode 100644 index 0000000000..3e70b3d646 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/legacy-v1-worker.ts @@ -0,0 +1,36 @@ +/** + * Legacy V1Worker subclass that registers with the old `slots` proto field + * instead of `slotConfig`. Used when connected to pre-slot-config engines. + */ + +/* eslint-disable no-underscore-dangle */ +import { ActionListener } from '@clients/dispatcher/action-listener'; +import { HatchetClient } from '@hatchet/v1'; +import { V1Worker } from '../worker-internal'; +import { legacyGetActionListener } from './legacy-registration'; + +export class LegacyV1Worker extends V1Worker { + private _legacySlotCount: number; + + constructor( + client: HatchetClient, + options: ConstructorParameters[1], + legacySlots: number + ) { + super(client, options); + this._legacySlotCount = legacySlots; + } + + /** + * Override registration to use the legacy `slots` proto field. + */ + protected override async createListener(): Promise { + return legacyGetActionListener(this.client._v0.dispatcher, { + workerName: this.name, + services: ['default'], + actions: Object.keys(this.action_registry), + slots: this._legacySlotCount, + labels: this.labels, + }); + } +} diff --git a/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts b/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts new file mode 100644 index 0000000000..f12c02718e --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts @@ -0,0 +1,162 @@ +/** + * Legacy dual-worker implementation for pre-slot-config engines. + * + * When connected to an older Hatchet engine that does not support multiple slot types, + * this module provides the old worker start flow which creates separate durable and + * non-durable workers, each registered with the legacy `slots` proto field. + */ + +/* eslint-disable no-underscore-dangle */ +import { Workflow as V0Workflow } from '@hatchet/workflow'; +import { Status } from 'nice-grpc'; +import { BaseWorkflowDeclaration } from '../../../declaration'; +import { HatchetClient } from '../../..'; +import { CreateWorkerOpts } from '../worker'; +import { LegacyV1Worker } from './legacy-v1-worker'; +import { emitDeprecationNotice, semverLessThan } from './deprecation'; + +const DEFAULT_DEFAULT_SLOTS = 100; +const DEFAULT_DURABLE_SLOTS = 1_000; + +/** The date when slot_config support was released. */ +const LEGACY_ENGINE_START = new Date('2026-02-12T00:00:00Z'); + +/** Minimum engine version that supports multiple slot types. */ +const MIN_SLOT_CONFIG_VERSION = 'v0.78.23'; + +const LEGACY_ENGINE_MESSAGE = + 'Connected to an older Hatchet engine that does not support multiple slot types. ' + + 'Falling back to legacy worker registration. ' + + 'Please upgrade your Hatchet engine to the latest version.'; + +/** + * Checks if the connected engine is legacy by comparing its semantic version + * against the minimum required version for slot_config support. + * Returns true if the engine is legacy, false otherwise. + * Emits a time-aware deprecation notice when a legacy engine is detected. + */ +export async function isLegacyEngine(v1: HatchetClient): Promise { + try { + const version = await v1._v0.dispatcher.getVersion(); + + // If the version is empty or older than the minimum, treat as legacy + if (!version || semverLessThan(version, MIN_SLOT_CONFIG_VERSION)) { + const logger = v1._v0.config.logger('Worker', v1._v0.config.log_level); + emitDeprecationNotice('legacy-engine', LEGACY_ENGINE_MESSAGE, LEGACY_ENGINE_START, logger, { + errorDays: 180, + }); + return true; + } + + return false; + } catch (e: any) { + if (e?.code === Status.UNIMPLEMENTED) { + const logger = v1._v0.config.logger('Worker', v1._v0.config.log_level); + emitDeprecationNotice('legacy-engine', LEGACY_ENGINE_MESSAGE, LEGACY_ENGINE_START, logger, { + errorDays: 180, + }); + return true; + } + // For other errors, assume new engine and let registration fail naturally + return false; + } +} + +/** + * LegacyDualWorker manages two V1Worker instances (nonDurable + durable) + * for engines that don't support slot_config. + * Uses the legacy `slots` proto field (maxRuns) instead of `slotConfig`. + */ +export class LegacyDualWorker { + private nonDurable: LegacyV1Worker; + private durable: LegacyV1Worker | undefined; + private name: string; + + constructor(name: string, nonDurable: LegacyV1Worker, durable?: LegacyV1Worker) { + this.name = name; + this.nonDurable = nonDurable; + this.durable = durable; + } + + /** + * Creates a legacy dual-worker setup from the given options. + * Workers are created with legacy registration (old `slots` proto field). + */ + static async create( + v1: HatchetClient, + name: string, + options: CreateWorkerOpts + ): Promise { + const defaultSlots = options.slots || options.maxRuns || DEFAULT_DEFAULT_SLOTS; + const durableSlots = options.durableSlots || DEFAULT_DURABLE_SLOTS; + + // Create the non-durable worker with legacy registration + const nonDurable = new LegacyV1Worker( + v1, + { name, labels: options.labels, handleKill: options.handleKill }, + defaultSlots + ); + + // Check if any workflows have durable tasks + let hasDurableTasks = false; + for (const wf of options.workflows || []) { + if (wf instanceof BaseWorkflowDeclaration) { + if (wf.definition._durableTasks.length > 0) { + hasDurableTasks = true; + break; + } + } + } + + let durableWorker: LegacyV1Worker | undefined; + if (hasDurableTasks) { + // Create the durable worker with legacy registration + durableWorker = new LegacyV1Worker( + v1, + { name: `${name}-durable`, labels: options.labels, handleKill: options.handleKill }, + durableSlots + ); + } + + const legacyWorker = new LegacyDualWorker(name, nonDurable, durableWorker); + + // Register workflows on appropriate workers + for (const wf of options.workflows || []) { + if (wf instanceof BaseWorkflowDeclaration) { + if (wf.definition._durableTasks.length > 0 && durableWorker) { + await durableWorker.registerWorkflowV1(wf); + durableWorker.registerDurableActionsV1(wf.definition); + } else { + await nonDurable.registerWorkflowV1(wf); + } + } else { + // fallback to v0 client for backwards compatibility + await nonDurable.registerWorkflow(wf as V0Workflow); + } + } + + return legacyWorker; + } + + /** + * Starts both workers using Promise.all. + */ + async start(): Promise { + const promises: Promise[] = [this.nonDurable.start()]; + if (this.durable) { + promises.push(this.durable.start()); + } + await Promise.all(promises); + } + + /** + * Stops both workers. + */ + async stop(): Promise { + const promises: Promise[] = [this.nonDurable.stop()]; + if (this.durable) { + promises.push(this.durable.stop()); + } + await Promise.all(promises); + } +} diff --git a/sdks/typescript/src/v1/client/worker/slot-utils.ts b/sdks/typescript/src/v1/client/worker/slot-utils.ts new file mode 100644 index 0000000000..82d8a8f9a0 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/slot-utils.ts @@ -0,0 +1,117 @@ +import { Workflow as V0Workflow } from '@hatchet/workflow'; +import { BaseWorkflowDeclaration } from '../../declaration'; +import { SlotConfig, SlotType } from '../../slot-types'; + +const DEFAULT_DEFAULT_SLOTS = 100; +const DEFAULT_DURABLE_SLOTS = 1_000; + +export interface WorkerSlotOptions { + /** (optional) Maximum number of concurrent runs on this worker, defaults to 100 */ + slots?: number; + /** (optional) Maximum number of concurrent durable tasks, defaults to 1,000 */ + durableSlots?: number; + /** (optional) Array of workflows to register */ + workflows?: BaseWorkflowDeclaration[] | V0Workflow[]; + /** @deprecated Use slots instead */ + maxRuns?: number; +} + +export function resolveWorkerOptions( + options: T +): T & { + slots?: number; + durableSlots?: number; + slotConfig: SlotConfig; +} { + const requiredSlotTypes = options.workflows + ? getRequiredSlotTypes(options.workflows) + : new Set(); + + const slotConfig: SlotConfig = + options.slots || options.durableSlots || options.maxRuns + ? { + ...(options.slots || options.maxRuns + ? { [SlotType.Default]: options.slots || options.maxRuns || 0 } + : {}), + ...(options.durableSlots ? { [SlotType.Durable]: options.durableSlots } : {}), + } + : {}; + + if (requiredSlotTypes.has(SlotType.Default) && slotConfig[SlotType.Default] == null) { + slotConfig[SlotType.Default] = DEFAULT_DEFAULT_SLOTS; + } + if (requiredSlotTypes.has(SlotType.Durable) && slotConfig[SlotType.Durable] == null) { + slotConfig[SlotType.Durable] = DEFAULT_DURABLE_SLOTS; + } + + if (Object.keys(slotConfig).length === 0) { + slotConfig[SlotType.Default] = DEFAULT_DEFAULT_SLOTS; + } + + return { + ...options, + slots: + options.slots || + options.maxRuns || + (slotConfig[SlotType.Default] != null ? slotConfig[SlotType.Default] : undefined), + durableSlots: + options.durableSlots || + (slotConfig[SlotType.Durable] != null ? slotConfig[SlotType.Durable] : undefined), + slotConfig, + }; +} + +// eslint-disable-next-line @typescript-eslint/naming-convention +export const testingExports = { + resolveWorkerOptions, +}; + +function getRequiredSlotTypes( + workflows: Array | V0Workflow> +): Set { + const required = new Set(); + const addFromRequests = ( + requests: Record | undefined, + fallbackType: SlotType + ) => { + if (requests && Object.keys(requests).length > 0) { + if (requests[SlotType.Default] !== undefined) { + required.add(SlotType.Default); + } + if (requests[SlotType.Durable] !== undefined) { + required.add(SlotType.Durable); + } + } else { + required.add(fallbackType); + } + }; + + for (const wf of workflows) { + if (wf instanceof BaseWorkflowDeclaration) { + // eslint-disable-next-line dot-notation + const tasks = wf.definition['_tasks'] as Array<{ slotRequests?: Record }>; + for (const task of tasks) { + addFromRequests(task.slotRequests, SlotType.Default); + } + // eslint-disable-next-line dot-notation + const durableTasks = wf.definition['_durableTasks'] as Array; + if (durableTasks.length > 0) { + required.add(SlotType.Durable); + } + + if (wf.definition.onFailure) { + const opts = + typeof wf.definition.onFailure === 'object' ? wf.definition.onFailure : undefined; + addFromRequests(opts?.slotRequests, SlotType.Default); + } + + if (wf.definition.onSuccess) { + const opts = + typeof wf.definition.onSuccess === 'object' ? wf.definition.onSuccess : undefined; + addFromRequests(opts?.slotRequests, SlotType.Default); + } + } + } + + return required; +} diff --git a/sdks/typescript/src/v1/client/worker/worker-internal.ts b/sdks/typescript/src/v1/client/worker/worker-internal.ts index c5dc58c94d..f827aad049 100644 --- a/sdks/typescript/src/v1/client/worker/worker-internal.ts +++ b/sdks/typescript/src/v1/client/worker/worker-internal.ts @@ -39,13 +39,15 @@ import { applyNamespace } from '@hatchet/util/apply-namespace'; import { Context, DurableContext } from './context'; import { parentRunContextManager } from '../../parent-run-context-vars'; import { HealthServer, workerStatus, type WorkerStatus } from './health-server'; +import { SlotConfig } from '../../slot-types'; export type ActionRegistry = Record; export interface WorkerOpts { name: string; handleKill?: boolean; - maxRuns?: number; + slots?: number; + durableSlots?: number; labels?: WorkerLabels; healthPort?: number; enableHealthServer?: boolean; @@ -63,7 +65,9 @@ export class V1Worker { listener: ActionListener | undefined; futures: Record> = {}; contexts: Record> = {}; - maxRuns?: number; + slots?: number; + durableSlots?: number; + slotConfig: SlotConfig; logger: Logger; @@ -82,14 +86,18 @@ export class V1Worker { options: { name: string; handleKill?: boolean; - maxRuns?: number; + slots?: number; + durableSlots?: number; + slotConfig?: SlotConfig; labels?: WorkerLabels; } ) { this.client = client; this.name = applyNamespace(options.name, this.client.config.namespace); this.action_registry = {}; - this.maxRuns = options.maxRuns; + this.slots = options.slots; + this.durableSlots = options.durableSlots; + this.slotConfig = options.slotConfig || {}; this.labels = options.labels || {}; @@ -127,11 +135,10 @@ export class V1Worker { } private getAvailableSlots(): number { - if (!this.maxRuns) { - return 0; - } + // sum all the slots in the slot config + const totalSlots = Object.values(this.slotConfig).reduce((acc, curr) => acc + curr, 0); const currentRuns = Object.keys(this.futures).length; - return Math.max(0, this.maxRuns - currentRuns); + return Math.max(0, totalSlots - currentRuns); } private getRegisteredActions(): string[] { @@ -284,6 +291,8 @@ export class V1Worker { rateLimits: [], workerLabels: {}, concurrency: [], + isDurable: false, + slotRequests: { default: 1 }, }; } @@ -306,6 +315,8 @@ export class V1Worker { backoffFactor: onFailure.backoff?.factor || workflow.taskDefaults?.backoff?.factor, backoffMaxSeconds: onFailure.backoff?.maxSeconds || workflow.taskDefaults?.backoff?.maxSeconds, + isDurable: false, + slotRequests: { default: 1 }, }; } @@ -381,6 +392,8 @@ export class V1Worker { inputJsonSchema = new TextEncoder().encode(JSON.stringify(jsonSchema)); } + const durableTaskSet = new Set(workflow._durableTasks); + const registeredWorkflow = this.client._v0.admin.putWorkflowV1({ name: workflow.name, description: workflow.description || '', @@ -412,6 +425,9 @@ export class V1Worker { backoffFactor: task.backoff?.factor || workflow.taskDefaults?.backoff?.factor, backoffMaxSeconds: task.backoff?.maxSeconds || workflow.taskDefaults?.backoff?.maxSeconds, conditions: taskConditionsToPb(task), + isDurable: durableTaskSet.has(task), + slotRequests: + task.slotRequests || (durableTaskSet.has(task) ? { durable: 1 } : { default: 1 }), concurrency: task.concurrency ? Array.isArray(task.concurrency) ? task.concurrency @@ -887,6 +903,20 @@ export class V1Worker { } } + /** + * Creates an action listener by registering the worker with the dispatcher. + * Override in subclasses to change registration behavior (e.g. legacy engines). + */ + protected async createListener(): Promise { + return this.client._v0.dispatcher.getActionListener({ + workerName: this.name, + services: ['default'], + actions: Object.keys(this.action_registry), + slotConfig: this.slotConfig, + labels: this.labels, + }); + } + async start() { this.setStatus(workerStatus.STARTING); @@ -908,13 +938,7 @@ export class V1Worker { } try { - this.listener = await this.client._v0.dispatcher.getActionListener({ - workerName: this.name, - services: ['default'], - actions: Object.keys(this.action_registry), - maxRuns: this.maxRuns, - labels: this.labels, - }); + this.listener = await this.createListener(); this.workerId = this.listener.workerId; this.setStatus(workerStatus.HEALTHY); diff --git a/sdks/typescript/src/v1/client/worker/worker-slot-capacities.test.ts b/sdks/typescript/src/v1/client/worker/worker-slot-capacities.test.ts new file mode 100644 index 0000000000..c36f8a4e48 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/worker-slot-capacities.test.ts @@ -0,0 +1,51 @@ +import { testingExports } from '@hatchet/v1/client/worker/slot-utils'; +import { WorkflowDeclaration } from '../../declaration'; +import { SlotType } from '../../slot-types'; + +const { resolveWorkerOptions } = testingExports; + +describe('resolveWorkerOptions slot config', () => { + it('sets default slots for non-durable tasks', () => { + const workflow = new WorkflowDeclaration({ name: 'default-wf' }); + workflow.task({ + name: 'task1', + fn: async () => undefined, + }); + + const resolved = resolveWorkerOptions({ workflows: [workflow] }); + + expect(resolved.slotConfig[SlotType.Default]).toBe(100); + expect(resolved.slotConfig[SlotType.Durable]).toBeUndefined(); + }); + + it('sets durable slots for durable-only workflows without default slots', () => { + const workflow = new WorkflowDeclaration({ name: 'durable-wf' }); + workflow.durableTask({ + name: 'durable-task', + fn: async () => undefined, + }); + + const resolved = resolveWorkerOptions({ workflows: [workflow] }); + + expect(resolved.slotConfig[SlotType.Durable]).toBe(1000); + expect(resolved.slotConfig[SlotType.Default]).toBeUndefined(); + expect(resolved.slots).toBeUndefined(); + }); + + it('sets both default and durable slots for mixed workflows', () => { + const workflow = new WorkflowDeclaration({ name: 'mixed-wf' }); + workflow.task({ + name: 'task1', + fn: async () => undefined, + }); + workflow.durableTask({ + name: 'durable-task', + fn: async () => undefined, + }); + + const resolved = resolveWorkerOptions({ workflows: [workflow] }); + + expect(resolved.slotConfig[SlotType.Default]).toBe(100); + expect(resolved.slotConfig[SlotType.Durable]).toBe(1000); + }); +}); diff --git a/sdks/typescript/src/v1/client/worker/worker.ts b/sdks/typescript/src/v1/client/worker/worker.ts index 572ad79b27..7619b97b38 100644 --- a/sdks/typescript/src/v1/client/worker/worker.ts +++ b/sdks/typescript/src/v1/client/worker/worker.ts @@ -6,27 +6,18 @@ import { WebhookWorkerCreateRequest } from '@hatchet/clients/rest/generated/data import { BaseWorkflowDeclaration } from '../../declaration'; import { HatchetClient } from '../..'; import { V1Worker } from './worker-internal'; - -const DEFAULT_DURABLE_SLOTS = 1_000; +import { resolveWorkerOptions, type WorkerSlotOptions } from './slot-utils'; +import { isLegacyEngine, LegacyDualWorker } from './deprecated'; /** * Options for creating a new hatchet worker * @interface CreateWorkerOpts */ -export interface CreateWorkerOpts { - /** (optional) Maximum number of concurrent runs on this worker, defaults to 100 */ - slots?: number; - /** (optional) Array of workflows to register */ - workflows?: BaseWorkflowDeclaration[] | V0Workflow[]; +export interface CreateWorkerOpts extends WorkerSlotOptions { /** (optional) Worker labels for affinity-based assignment */ labels?: WorkerLabels; /** (optional) Whether to handle kill signals */ handleKill?: boolean; - /** @deprecated Use slots instead */ - maxRuns?: number; - - /** (optional) Maximum number of concurrent runs on the durable worker, defaults to 1,000 */ - durableSlots?: number; } /** @@ -39,8 +30,13 @@ export class Worker { _v0: LegacyHatchetClient; /** Internal reference to the underlying V0 worker implementation */ - nonDurable: V1Worker; - durable?: V1Worker; + _internal: V1Worker; + + /** Set when connected to a legacy engine that needs dual-worker architecture */ + private _legacyWorker: LegacyDualWorker | undefined; + + /** Tracks all workflows registered after construction (via registerWorkflow/registerWorkflows) */ + private _registeredWorkflows: Array | V0Workflow> = []; /** * Creates a new HatchetWorker instance @@ -55,7 +51,7 @@ export class Worker { ) { this._v1 = v1; this._v0 = v0; - this.nonDurable = nonDurable; + this._internal = nonDurable; this.config = config; this.name = name; } @@ -72,10 +68,10 @@ export class Worker { name: string, options: CreateWorkerOpts ) { + const resolvedOptions = resolveWorkerOptions(options); const opts = { name, - ...options, - maxRuns: options.slots || options.maxRuns, + ...resolvedOptions, }; const internalWorker = new V1Worker(v1, opts); @@ -91,26 +87,18 @@ export class Worker { */ async registerWorkflows(workflows?: Array | V0Workflow>) { for (const wf of workflows || []) { + this._registeredWorkflows.push(wf); + if (wf instanceof BaseWorkflowDeclaration) { // TODO check if tenant is V1 - await this.nonDurable.registerWorkflowV1(wf); + await this._internal.registerWorkflowV1(wf); if (wf.definition._durableTasks.length > 0) { - if (!this.durable) { - const opts = { - name: `${this.name}-durable`, - ...this.config, - maxRuns: this.config.durableSlots || DEFAULT_DURABLE_SLOTS, - }; - - this.durable = new V1Worker(this._v1, opts); - await this.durable.registerWorkflowV1(wf, true); - } - this.durable.registerDurableActionsV1(wf.definition); + this._internal.registerDurableActionsV1(wf.definition); } } else { // fallback to v0 client for backwards compatibility - await this.nonDurable.registerWorkflow(wf); + await this._internal.registerWorkflow(wf); } } } @@ -129,14 +117,21 @@ export class Worker { * Starts the worker * @returns Promise that resolves when the worker is stopped or killed */ - start() { - const workers = [this.nonDurable]; - - if (this.durable) { - workers.push(this.durable); + async start() { + // Check engine version and fall back to legacy dual-worker mode if needed + if (await isLegacyEngine(this._v1)) { + // Include workflows registered after construction (via registerWorkflow/registerWorkflows) + // so the legacy worker picks them up. + const legacyConfig: CreateWorkerOpts = { + ...this.config, + workflows: this._registeredWorkflows.length + ? (this._registeredWorkflows as BaseWorkflowDeclaration[]) + : this.config.workflows, + }; + this._legacyWorker = await LegacyDualWorker.create(this._v1, this.name, legacyConfig); + return this._legacyWorker.start(); } - - return Promise.all(workers.map((w) => w.start())); + return this._internal.start(); } /** @@ -144,13 +139,10 @@ export class Worker { * @returns Promise that resolves when the worker stops */ stop() { - const workers = [this.nonDurable]; - - if (this.durable) { - workers.push(this.durable); + if (this._legacyWorker) { + return this._legacyWorker.stop(); } - - return Promise.all(workers.map((w) => w.stop())); + return this._internal.stop(); } /** @@ -159,7 +151,7 @@ export class Worker { * @returns Promise that resolves when labels are updated */ upsertLabels(labels: WorkerLabels) { - return this.nonDurable.upsertLabels(labels); + return this._internal.upsertLabels(labels); } /** @@ -167,7 +159,7 @@ export class Worker { * @returns The labels for the worker */ getLabels() { - return this.nonDurable.labels; + return this._internal.labels; } /** @@ -176,43 +168,33 @@ export class Worker { * @returns A promise that resolves when the webhook is registered */ registerWebhook(webhook: WebhookWorkerCreateRequest) { - return this.nonDurable.registerWebhook(webhook); + return this._internal.registerWebhook(webhook); } async isPaused() { - const promises: Promise[] = []; - if (this.nonDurable?.workerId) { - promises.push(this._v1.workers.isPaused(this.nonDurable.workerId)); - } - if (this.durable?.workerId) { - promises.push(this._v1.workers.isPaused(this.durable.workerId)); + if (!this._internal?.workerId) { + return false; } - const res = await Promise.all(promises); - - return !res.includes(false); + return this._v1.workers.isPaused(this._internal.workerId); } // TODO docstrings pause() { - const promises: Promise[] = []; - if (this.nonDurable?.workerId) { - promises.push(this._v1.workers.pause(this.nonDurable.workerId)); - } - if (this.durable?.workerId) { - promises.push(this._v1.workers.pause(this.durable.workerId)); + if (!this._internal?.workerId) { + return Promise.resolve(); } - return Promise.all(promises); + + return this._v1.workers.pause(this._internal.workerId); } unpause() { - const promises: Promise[] = []; - if (this.nonDurable?.workerId) { - promises.push(this._v1.workers.unpause(this.nonDurable.workerId)); - } - if (this.durable?.workerId) { - promises.push(this._v1.workers.unpause(this.durable.workerId)); + if (!this._internal?.workerId) { + return Promise.resolve(); } - return Promise.all(promises); + + return this._v1.workers.unpause(this._internal.workerId); } } + +export { testingExports as __testing } from './slot-utils'; diff --git a/sdks/typescript/src/v1/index.ts b/sdks/typescript/src/v1/index.ts index 4a4dee93ed..558adca081 100644 --- a/sdks/typescript/src/v1/index.ts +++ b/sdks/typescript/src/v1/index.ts @@ -7,3 +7,4 @@ export * from './client/duration'; export * from './types'; export * from './task'; export * from './client/worker/context'; +export * from './slot-types'; diff --git a/sdks/typescript/src/v1/slot-types.ts b/sdks/typescript/src/v1/slot-types.ts new file mode 100644 index 0000000000..9a8f473231 --- /dev/null +++ b/sdks/typescript/src/v1/slot-types.ts @@ -0,0 +1,7 @@ +// eslint-disable-next-line no-shadow +export enum SlotType { + Default = 'default', + Durable = 'durable', +} + +export type SlotConfig = Partial>; diff --git a/sdks/typescript/src/v1/task.ts b/sdks/typescript/src/v1/task.ts index 4b4a10d168..641bed36f6 100644 --- a/sdks/typescript/src/v1/task.ts +++ b/sdks/typescript/src/v1/task.ts @@ -137,6 +137,9 @@ export type CreateBaseTaskOpts< * (optional) the concurrency options for the task */ concurrency?: Concurrency | Concurrency[]; + + /** @internal */ + slotRequests?: Record; }; export type CreateWorkflowTaskOpts< diff --git a/sql/schema/v0.sql b/sql/schema/v0.sql index 08ce1d671d..91d58de72e 100644 --- a/sql/schema/v0.sql +++ b/sql/schema/v0.sql @@ -454,6 +454,7 @@ CREATE TABLE "Step" ( -- the maximum amount of time in seconds to wait between retries "retryMaxBackoff" INTEGER, "scheduleTimeout" TEXT NOT NULL DEFAULT '5m', + "isDurable" BOOLEAN NOT NULL DEFAULT false, CONSTRAINT "Step_pkey" PRIMARY KEY ("id") ); @@ -850,6 +851,7 @@ CREATE TABLE "Worker" ( "lastHeartbeatAt" TIMESTAMP(3), "name" TEXT NOT NULL, "dispatcherId" UUID, + -- FIXME: maxRuns is deprecated, remove this column in a future migration "maxRuns" INTEGER NOT NULL DEFAULT 100, "isActive" BOOLEAN NOT NULL DEFAULT false, "lastListenerEstablished" TIMESTAMP(3), diff --git a/sql/schema/v1-core.sql b/sql/schema/v1-core.sql index b1bea87b97..30528a9434 100644 --- a/sql/schema/v1-core.sql +++ b/sql/schema/v1-core.sql @@ -432,6 +432,48 @@ alter table v1_task_runtime set ( autovacuum_vacuum_cost_limit='1000' ); +-- v1_worker_slot_config stores per-worker config for arbitrary slot types. +CREATE TABLE v1_worker_slot_config ( + tenant_id UUID NOT NULL, + worker_id UUID NOT NULL, + slot_type TEXT NOT NULL, + max_units INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (tenant_id, worker_id, slot_type) +); + +-- v1_step_slot_request stores per-step slot requests. +CREATE TABLE v1_step_slot_request ( + tenant_id UUID NOT NULL, + step_id UUID NOT NULL, + slot_type TEXT NOT NULL, + units INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (tenant_id, step_id, slot_type) +); + +CREATE INDEX v1_step_slot_request_step_idx + ON v1_step_slot_request (step_id ASC); + +-- v1_task_runtime_slot stores runtime slot consumption per task. +CREATE TABLE v1_task_runtime_slot ( + tenant_id UUID NOT NULL, + task_id bigint NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + retry_count INTEGER NOT NULL, + worker_id UUID NOT NULL, + slot_type TEXT NOT NULL, + units INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (task_id, task_inserted_at, retry_count, slot_type) +); + +CREATE INDEX v1_task_runtime_slot_tenant_worker_type_idx + ON v1_task_runtime_slot (tenant_id ASC, worker_id ASC, slot_type ASC); + -- v1_rate_limited_queue_items represents a queue item that has been rate limited and removed from the v1_queue_item table. CREATE TABLE v1_rate_limited_queue_items ( requeue_after TIMESTAMPTZ NOT NULL,