Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
76 commits
Select commit Hold shift + click to select a range
91a05bd
poc
shults Feb 2, 2026
6c59ebd
import fixe
shults Feb 6, 2026
c88e5f8
part B poc
shults Feb 9, 2026
8496a19
add backet tests, removed data race
shults Feb 9, 2026
8d0016b
test cover
shults Feb 9, 2026
ebda404
spome other tests
shults Feb 9, 2026
64772f8
header sender unit tests
shults Feb 10, 2026
43f45fe
code coverate improvement
shults Feb 10, 2026
db1b418
artificial code coverage increase :)
shults Feb 10, 2026
514af40
add tests for limitSentinel
shults Feb 10, 2026
7fe4d97
mock
shults Feb 10, 2026
1f7b9ea
add tests
shults Feb 10, 2026
47d245b
limitRedis tests
shults Feb 10, 2026
547ebce
apply Patrics succestion
shults Feb 12, 2026
70c614a
config loading precedence
shults Feb 12, 2026
0c4d37b
Maciej review
shults Feb 12, 2026
68cd697
poc2
shults Feb 18, 2026
42ccf6d
fix failing unit test
shults Feb 19, 2026
75918ab
fix faling tests
shults Feb 19, 2026
68526b8
merge master and resolve conflicts
Mar 31, 2026
945e1b4
feat: add bucket state and checker and update rate limits headers han…
Mar 31, 2026
b4d2521
test: update bucket tests with using synctest to avoid flakiness
Mar 31, 2026
ae1f5e0
Merge branch 'master' of github.com:TykTechnologies/tyk into feat/TT-…
Mar 31, 2026
d949f75
fix: update new bucket state checker with using tokens conversation i…
Apr 1, 2026
ee4c0f5
fix: update shouldBlock condition after checking the limiters logic w…
Apr 1, 2026
49e4deb
test: adding tests covering rate limit headers logic for rate_limitin…
Apr 1, 2026
516b87b
test: add tests covering edge cases for rate limit and quota headers
Apr 1, 2026
546f9d8
fix: update reset value for rate_limit at context with unix format
Apr 1, 2026
d7807fa
test: add test covering logic with limiters for api mw
Apr 1, 2026
fa23666
refactor: extend existing test instad creaing new one
Apr 1, 2026
722a77b
refactor: remove redundant test
Apr 1, 2026
4ae08d8
feat: add headers removing by rate limit sender to avoid override by …
Apr 1, 2026
e7c7855
fix: update test with actually hitting the dynamic handler route
Apr 1, 2026
6bc00ea
fix: update condition for limitDRL to avoid cases of returning should…
Apr 1, 2026
82cc2fb
feat: add send quotas to cache_mw for striping cached upstream headers
Apr 2, 2026
81bd076
Merge branch 'master' of github.com:TykTechnologies/tyk into feat/TT-…
Apr 2, 2026
3a46ad6
chore: remove Yarik's highlighting comments
Apr 2, 2026
d375f39
chore: update comment
Apr 3, 2026
2f2c92d
feat: update condition to always set remaining values to 0 if origina…
Apr 3, 2026
5d6aaea
chore: remove Yarik's comment
Apr 3, 2026
89dbae2
chore: update script with formatting
Apr 3, 2026
dc5f79d
docs: add comments explaining the logic behind RL and Quotas headers …
Apr 3, 2026
50a95ff
chore: remove gomock dependency and custom stub because its not used …
Apr 3, 2026
a210b75
fix: update mw_org_activity logic with passing nil and add condition …
Apr 3, 2026
d485ffb
feat: add condition to write headers for exposing global api limits f…
Apr 3, 2026
d692b48
fix: replace "" with "0" in tests after update logic with remaining h…
Apr 3, 2026
b50f082
fix: add case for mw_url_rewrite to process checker state int
Apr 3, 2026
11ce169
test: add one more eventually to fix flaky test
Apr 3, 2026
352b3bc
Revert "test: add one more eventually to fix flaky test"
Apr 3, 2026
bd2f515
test: increase time to resolve flaky test
Apr 3, 2026
6361bf3
test: update tests with fixes for flaky tests
Apr 3, 2026
8b9d695
test: trying to fix flaky test with mutex
Apr 4, 2026
e7265f7
test: add idle conn timeout to 10 seconds to check if it helps to fix…
Apr 4, 2026
51acb6a
debug: check if rework assertion with > 0 for connections will fix th…
Apr 4, 2026
69c9467
debug: adding http handler idle timeout with launching proxies with r…
Apr 4, 2026
ad91879
debug: another try with http handler timeouts update
Apr 4, 2026
9057bbf
fix: add closing test case server on test and revert changes of faili…
Apr 6, 2026
2bcec8a
fix: add unique apiid to avoid key collision on redis
Apr 6, 2026
e67607c
fix: add unique key for redis at limitRedis test
Apr 6, 2026
157e38e
revert refactoring changes to avoid 100 issues on test file
Apr 6, 2026
c8954a5
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 6, 2026
73a6407
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 6, 2026
aab63f1
feat: replace global enable rl ctx variables with api def check
Apr 7, 2026
e11b500
fix: remove enable_context_variables from cli schema
Apr 7, 2026
eb72dbe
test: add tests covering logic with attaching rl and quota values to …
Apr 7, 2026
840cd11
refactor: move rate_limit_headers sourced to rate_limit config type a…
Apr 7, 2026
3b9c0a1
config: replace rl_headers_source with rl_response_headers to make it…
Apr 7, 2026
776896d
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 7, 2026
6751798
docs: update cli schema description of new field
vladzabolotnyi Apr 8, 2026
3f79432
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 10, 2026
9d44b57
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 14, 2026
93ef219
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 15, 2026
36f92b8
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 17, 2026
739d1ec
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 17, 2026
4b325f3
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 20, 2026
93571d1
Merge branch 'master' into feat/TT-6075/update-rate-limit-header-logic
vladzabolotnyi Apr 20, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions cli/linter/schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -1610,6 +1610,12 @@
}
}
},
"rate_limit_response_headers": {
"description": "Determines the type of data that will be returned in the rate limit headers",
"type": ["string"],
"enum": ["", "quotas", "rate_limits"],
"default": "quotas"
},
"allow_unsafe_policy_ids": {
"type": ["boolean", "null"],
"additionalProperties": false
Expand Down
12 changes: 12 additions & 0 deletions config/rate_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,20 @@ type RateLimit struct {

// Controls which algorthm to use as a fallback when your distributed rate limiter can't be used.
DRLEnableSentinelRateLimiter bool `json:"drl_enable_sentinel_rate_limiter"`

// RateLimitResponseHeaders specifies the data source for rate limit headers in HTTP responses.
// This controls whether rate limit headers (X-RateLimit-Limit, X-RateLimit-Remaining, etc.)
// are populated from quota data or rate limit data. Valid values: "quotas", "rate_limits".
RateLimitResponseHeaders RateLimitSource `json:"rate_limit_response_headers"`
}

type RateLimitSource string

const (
SourceQuotas RateLimitSource = "quotas"
SourceRateLimits RateLimitSource = "rate_limits"
)

// String returns a readable setting for the rate limiter in effect.
func (r *RateLimit) String() string {
info := "using transactions"
Expand Down
4 changes: 2 additions & 2 deletions gateway/middleware_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ func TestSessionLimiter_RedisQuotaExceeded_ExpiredAtReset(t *testing.T) {
}

beforeTime := time.Now()
blocked := limiter.RedisQuotaExceeded(req, session, quotaKey, "", limit, g.Gw.GlobalSessionManager.Store(), false)
blocked := limiter.RedisQuotaExceeded(req, session, quotaKey, "", limit, false, false)
afterTime := time.Now()

assert.Equal(t, quotaMax-1, session.QuotaRemaining, "Quota remaining should be quotaMax - 1 after increment")
Expand Down Expand Up @@ -475,7 +475,7 @@ func TestSessionLimiter_RedisQuotaExceeded_ExpiredAtReset(t *testing.T) {
}

beforeTime := time.Now()
blocked := limiter.RedisQuotaExceeded(req, session, quotaKey, scope, limit, g.Gw.GlobalSessionManager.Store(), false)
blocked := limiter.RedisQuotaExceeded(req, session, quotaKey, scope, limit, false, false)
afterTime := time.Now()

accessDef := session.AccessRights["api1"]
Expand Down
25 changes: 24 additions & 1 deletion gateway/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,20 @@ import (

type EventMetaDefault = model.EventMetaDefault

type CtxData = map[string]any

const (
ctxDataKeyRateLimitLimit = "rate_limit_limit"
ctxDataKeyRateLimitRemaining = "rate_limit_remaining"
ctxDataKeyRateLimitReset = "rate_limit_reset"

ctxDataKeyQuotaLimit = "quota_limit"
ctxDataKeyQuotaRemaining = "quota_remaining"
ctxDataKeyQuotaReset = "quota_reset"
)

var (
ctxData = httpctx.NewValue[map[string]any](ctx.ContextData)
ctxData = httpctx.NewValue[CtxData](ctx.ContextData)

ctxGetData = ctxData.Get
ctxSetData = ctxData.Set
Expand All @@ -28,3 +40,14 @@ var (

EncodeRequestToEvent = event.EncodeRequestToEvent
)

func ctxGetOrCreateData(r *http.Request) CtxData {
data := ctxGetData(r)

if data == nil {
data = CtxData{}
ctxSetData(r, data)
}

return data
}
19 changes: 0 additions & 19 deletions gateway/model_apispec.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
Expand All @@ -17,15 +16,13 @@ import (
"github.com/TykTechnologies/tyk/apidef/oas"
"github.com/TykTechnologies/tyk/config"
"github.com/TykTechnologies/tyk/ctx"
"github.com/TykTechnologies/tyk/header"
"github.com/TykTechnologies/tyk/internal/agentprotocol"
"github.com/TykTechnologies/tyk/internal/certcheck"
"github.com/TykTechnologies/tyk/internal/errors"
"github.com/TykTechnologies/tyk/internal/graphengine"
"github.com/TykTechnologies/tyk/internal/httpctx"
"github.com/TykTechnologies/tyk/internal/httputil"
"github.com/TykTechnologies/tyk/internal/jsonrpc"
"github.com/TykTechnologies/tyk/user"

_ "github.com/TykTechnologies/tyk/internal/mcp" // registers MCP VEM prefixes
)
Expand Down Expand Up @@ -364,19 +361,3 @@ func (a *APISpec) APIType() string {
return "classic"
}
}

func (a *APISpec) sendRateLimitHeaders(session *user.SessionState, dest *http.Response) {
quotaMax, quotaRemaining, quotaRenews := int64(0), int64(0), int64(0)

if session != nil {
quotaMax, quotaRemaining, _, quotaRenews = session.GetQuotaLimitByAPIID(a.APIID)
}

if dest.Header == nil {
dest.Header = http.Header{}
}

dest.Header.Set(header.XRateLimitLimit, strconv.Itoa(int(quotaMax)))
dest.Header.Set(header.XRateLimitRemaining, strconv.Itoa(int(quotaRemaining)))
dest.Header.Set(header.XRateLimitReset, strconv.Itoa(int(quotaRenews)))
}
32 changes: 32 additions & 0 deletions gateway/model_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
package gateway

import (
"net/http"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func Test_ctxGetOrCreateData(t *testing.T) {
t.Run("returns data if already exists", func(t *testing.T) {
req, err := http.NewRequestWithContext(t.Context(), "GET", "/", nil)
require.NoError(t, err)

ctxSetData(req, CtxData{"hello": "world0"})

assert.Equal(t, CtxData{"hello": "world0"}, ctxGetOrCreateData(req))
})

t.Run("create new data if not exists", func(t *testing.T) {
req, err := http.NewRequestWithContext(t.Context(), "GET", "/", nil)
require.NoError(t, err)

data1 := ctxGetOrCreateData(req)
data1["hello"] = "world1"

data2 := ctxGetOrCreateData(req)

assert.Equal(t, data1, data2)
})
}
19 changes: 14 additions & 5 deletions gateway/mw_api_rate_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
"time"

"github.com/TykTechnologies/tyk/ctx"
"github.com/TykTechnologies/tyk/header"
tykerrors "github.com/TykTechnologies/tyk/internal/errors"
"github.com/TykTechnologies/tyk/internal/event"
"github.com/TykTechnologies/tyk/storage"
Expand Down Expand Up @@ -94,30 +95,38 @@
}

// ProcessRequest will run any checks on the request on the way through the system, return an error to have the chain fail
func (k *RateLimitForAPI) ProcessRequest(_ http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
//
//nolint:staticcheck
func (k *RateLimitForAPI) ProcessRequest(rw http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
// Skip rate limiting and quotas for looping
if !ctxCheckLimits(r) {
return nil, http.StatusOK
}

Check warning on line 104 in gateway/mw_api_rate_limit.go

View check run for this annotation

probelabs / Visor: architecture

architecture Issue

The logic to prevent API-level rate limit headers from overwriting per-key headers relies on checking for the presence of the `X-RateLimit-Limit` header. This creates a fragile, implicit dependency on the execution order and side effects of the `RateLimitAndQuotaCheck` middleware. A change in that middleware's header-setting behavior could break this logic. This pattern makes the interaction between middleware components less explicit and harder to reason about.
Raw output
Use a request context value to explicitly signal that rate limit headers have already been handled by a more specific limiter. For example, the `RateLimitAndQuotaCheck` middleware could set a flag in the context, and this middleware would check for that flag instead of inspecting the headers. This makes the contract between the middleware components explicit.

storeRef := k.Gw.GlobalSessionManager.Store()
session := k.getSession(r)

limitHeaderSender := k.Gw.limitHeaderFactory(rw.Header())
// Only inject API-level rate limit headers if personal rate limit headers
// haven't already been injected by RateLimitAndQuotaCheck.
if rw.Header().Get(header.XRateLimitLimit) != "" {
limitHeaderSender = nil
}

reason := k.Gw.SessionLimiter.ForwardMessage(
r,
k.getSession(r),
session,
k.keyName,
k.quotaKey,
storeRef,
true,
false,
k.Spec,
false,
limitHeaderSender,
)

k.emitRateLimitEvents(r, k.keyName)

if reason == sessionFailRateLimit {
// Set error classification for access logs
ctx.SetErrorClassification(r, tykerrors.ClassifyRateLimitError(tykerrors.ErrTypeAPIRateLimit, k.Name()))
return k.handleRateLimitFailure(r, event.RateLimitExceeded, "API Rate Limit Exceeded", k.keyName)
}
Expand Down
79 changes: 79 additions & 0 deletions gateway/mw_api_rate_limit_test.go
Original file line number Diff line number Diff line change
@@ -1,80 +1,159 @@
package gateway

import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"

"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/config"
"github.com/TykTechnologies/tyk/header"

"github.com/stretchr/testify/assert"

"github.com/justinas/alice"

"github.com/TykTechnologies/tyk/internal/uuid"

"github.com/TykTechnologies/tyk/test"
"github.com/TykTechnologies/tyk/user"
)

func createRLSession() *user.SessionState {
session := user.NewSessionState()
// essentially non-throttled
session.Rate = 100.0
session.Allowance = session.Rate
session.LastCheck = time.Now().Unix()
session.Per = 1.0
session.QuotaRenewalRate = 300 // 5 minutes
session.QuotaRenews = time.Now().Unix()
session.QuotaRemaining = 10
session.QuotaMax = 10
session.AccessRights = map[string]user.AccessDefinition{"31445455": {APIName: "Tyk Auth Key Test", APIID: "31445455", Versions: []string{"default"}}}
return session
}

func (ts *Test) getRLOpenChain(spec *APISpec) http.Handler {

remote, _ := url.Parse(spec.Proxy.TargetURL)
proxy := ts.Gw.TykNewSingleHostReverseProxy(remote, spec, nil)
proxyHandler := ProxyHandler(proxy, spec)
baseMid := &BaseMiddleware{Spec: spec, Proxy: proxy, Gw: ts.Gw}
chain := alice.New(ts.Gw.mwList(
&IPWhiteListMiddleware{baseMid},
&IPBlackListMiddleware{BaseMiddleware: baseMid},
&VersionCheck{BaseMiddleware: baseMid},
&RateLimitForAPI{BaseMiddleware: baseMid},
)...).Then(proxyHandler)
return chain
}

func (ts *Test) getGlobalRLAuthKeyChain(spec *APISpec) http.Handler {

remote, _ := url.Parse(spec.Proxy.TargetURL)
proxy := ts.Gw.TykNewSingleHostReverseProxy(remote, spec, nil)
proxyHandler := ProxyHandler(proxy, spec)
baseMid := &BaseMiddleware{Spec: spec, Proxy: proxy, Gw: ts.Gw}
chain := alice.New(ts.Gw.mwList(
&IPWhiteListMiddleware{baseMid},
&IPBlackListMiddleware{BaseMiddleware: baseMid},
&AuthKey{baseMid},
&VersionCheck{BaseMiddleware: baseMid},
&KeyExpired{baseMid},
&AccessRightsCheck{baseMid},
&RateLimitForAPI{BaseMiddleware: baseMid},
&RateLimitAndQuotaCheck{baseMid},
)...).Then(proxyHandler)
return chain
}

func TestRateLimitForAPI_EnabledForSpec(t *testing.T) {
apiSpecDisabled := APISpec{APIDefinition: &apidef.APIDefinition{GlobalRateLimit: apidef.GlobalRateLimit{Disabled: true, Rate: 2, Per: 1}}}

rlDisabled := &RateLimitForAPI{BaseMiddleware: &BaseMiddleware{Spec: &apiSpecDisabled}}
assert.False(t, rlDisabled.EnabledForSpec())
}

func TestAPIRateLimitResponseHeaders(t *testing.T) {
limiters := []string{"Redis", "Sentinel", "DRL", "FixedWindow"}

for _, limiter := range limiters {
t.Run("API Rate limit headers for "+limiter, func(t *testing.T) {
ts := StartTest(func(globalConf *config.Config) {
globalConf.RateLimitResponseHeaders = config.SourceRateLimits

switch limiter {
case "Redis":
globalConf.EnableRedisRollingLimiter = true
case "Sentinel":
globalConf.EnableSentinelRateLimiter = true
case "DRL":
globalConf.DRLEnableSentinelRateLimiter = true
case "FixedWindow":
globalConf.EnableFixedWindowRateLimiter = true
}
})

Check warning on line 99 in gateway/mw_api_rate_limit_test.go

View check run for this annotation

probelabs / Visor: quality

style Issue

The test logic in `TestAPIRateLimitResponseHeaders` is nearly identical to the logic in `TestRateLimitResponseHeaders` in `gateway/mw_rate_limiting_test.go`. Both functions iterate through the same set of rate limiters and perform similar request and response header checks. The primary difference is that one tests keyless (global) rate limits while the other tests per-key rate limits. This duplication makes the tests harder to maintain, as any change to the testing logic must be applied in two places.
Raw output
Refactor the common test logic into a shared test helper function. This helper could accept parameters to configure the API for either keyless or per-key rate limiting and to determine whether an authorization header should be sent. This would eliminate significant code duplication and centralize the test logic, improving maintainability.
defer ts.Close()

var (
rateLimitRate float64 = 2
rateLimitPer float64 = 10
)

_ = ts.Gw.BuildAndLoadAPI(func(spec *APISpec) {
spec.APIID = "api-rate-limit-headers-test-" + limiter
spec.Proxy.ListenPath = "/api-rate-limit-headers-test"
spec.UseKeylessAccess = true
spec.GlobalRateLimit = apidef.GlobalRateLimit{
Disabled: false,
Rate: rateLimitRate,
Per: rateLimitPer,
}
})[0]

expectedRemaining1 := fmt.Sprintf("%d", int(rateLimitRate)-1)
expectedRemaining2 := fmt.Sprintf("%d", int(rateLimitRate)-2)

headersMatch1 := map[string]string{
header.XRateLimitLimit: fmt.Sprintf("%d", int(rateLimitRate)),
}
headersMatch2 := map[string]string{
header.XRateLimitLimit: fmt.Sprintf("%d", int(rateLimitRate)),
}

// For limiters that don't support Remaining (Sentinel, FixedWindow), it should be assigned to 0.
if limiter == "Redis" || limiter == "DRL" {
headersMatch1[header.XRateLimitRemaining] = expectedRemaining1
headersMatch2[header.XRateLimitRemaining] = expectedRemaining2
} else {
headersMatch1[header.XRateLimitRemaining] = "0"
headersMatch2[header.XRateLimitRemaining] = "0"
}

_, _ = ts.Run(t, []test.TestCase{
{
Path: "/api-rate-limit-headers-test",
Code: http.StatusOK,
HeadersMatch: headersMatch1,
},
{
Path: "/api-rate-limit-headers-test",
Code: http.StatusOK,
HeadersMatch: headersMatch2,
},
{
Path: "/api-rate-limit-headers-test",
Code: http.StatusTooManyRequests,
},
}...)
})
}
}

func TestRLOpen(t *testing.T) {
ts := StartTest(nil)
defer ts.Close()
Expand Down
2 changes: 1 addition & 1 deletion gateway/mw_mock_response.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@
res.Header.Set(header.Connection, "close")
}

m.Spec.sendRateLimitHeaders(ctxGetSession(r), res)
m.Gw.limitHeaderFactory(res.Header).SendQuotas(ctxGetSession(r), m.Spec.APIID)

Check failure on line 173 in gateway/mw_mock_response.go

View check run for this annotation

probelabs / Visor: architecture

architecture Issue

This middleware suffers from the same architectural flaw as the caching middleware. When a mock response is served in `rate_limits` mode, this line effectively causes the correct rate-limit headers (set by `RateLimitAndQuotaCheck`) to be cleared from the final response.
Raw output
Conditionalize the call to `SendQuotas` to only run when not in `rate_limits` mode. This will preserve the headers set by the preceding rate-limiting middleware.

return res, internal, nil
}
Expand Down
22 changes: 17 additions & 5 deletions gateway/mw_organisation_activity.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,13 @@ func (k *OrganizationMonitor) refreshOrgSession(orgID string) {
}

// ProcessRequest will run any checks on the request on the way through the system, return an error to have the chain fail
func (k *OrganizationMonitor) ProcessRequestLive(r *http.Request, orgSession *user.SessionState) (error, int) {
//
//nolint:staticcheck
func (k *OrganizationMonitor) ProcessRequestLive(
r *http.Request,
orgSession *user.SessionState,
) (error, int) {

logger := k.Logger()

if orgSession.IsInactive {
Expand All @@ -135,11 +141,11 @@ func (k *OrganizationMonitor) ProcessRequestLive(r *http.Request, orgSession *us
orgSession,
k.Spec.OrgID,
"",
k.Spec.OrgSessionManager.Store(),
orgSession.Per > 0 && orgSession.Rate > 0,
true,
k.Spec,
false,
nil,
)

sessionLifeTime := orgSession.Lifetime(k.Spec.GetSessionLifetimeRespectsKeyExpiration(), k.Spec.SessionLifetime, k.Gw.GetConfig().ForceGlobalSessionLifetime, k.Gw.GetConfig().GlobalSessionLifetime)
Expand Down Expand Up @@ -211,7 +217,12 @@ func (k *OrganizationMonitor) SetOrgSentinel(orgChan chan bool, orgId string) {
}
}

func (k *OrganizationMonitor) ProcessRequestOffThread(r *http.Request, orgSession *user.SessionState) (error, int) {
//nolint:staticcheck
func (k *OrganizationMonitor) ProcessRequestOffThread(
r *http.Request,
orgSession *user.SessionState,
) (error, int) {

orgChanMap.Lock()
orgChan, ok := orgChanMap.channels[k.Spec.OrgID]
if !ok {
Expand Down Expand Up @@ -251,7 +262,8 @@ func (k *OrganizationMonitor) AllowAccessNext(
path string,
IP string,
r *http.Request,
session *user.SessionState) {
session *user.SessionState,
) {

// Is it active?
logEntry := k.Gw.getExplicitLogEntryForRequest(k.Logger(), path, IP, k.Spec.OrgID, nil)
Expand All @@ -269,11 +281,11 @@ func (k *OrganizationMonitor) AllowAccessNext(
session,
k.Spec.OrgID,
customQuotaKey,
k.Spec.OrgSessionManager.Store(),
session.Per > 0 && session.Rate > 0,
true,
k.Spec,
false,
nil,
)

sessionLifeTime := session.Lifetime(k.Spec.GetSessionLifetimeRespectsKeyExpiration(), k.Spec.SessionLifetime, k.Gw.GetConfig().ForceGlobalSessionLifetime, k.Gw.GetConfig().GlobalSessionLifetime)
Expand Down
Loading
Loading