Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make default hitsAddend minimum value configurable #729

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion src/limiter/base_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ type BaseRateLimiter struct {
localCache *freecache.Cache
nearLimitRatio float32
StatsManager stats.Manager
HitsAddendMinValue uint32
}

type LimitInfo struct {
Expand Down Expand Up @@ -143,7 +144,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
}

func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64,
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager,
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager, hitsAddendMinValue uint32,
) *BaseRateLimiter {
return &BaseRateLimiter{
timeSource: timeSource,
Expand All @@ -153,6 +154,7 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira
localCache: localCache,
nearLimitRatio: nearLimitRatio,
StatsManager: statsManager,
HitsAddendMinValue: hitsAddendMinValue,
}
}

Expand Down
7 changes: 4 additions & 3 deletions src/memcached/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func (this *rateLimitMemcacheImpl) DoLimit(
logger.Debugf("starting cache lookup")

// request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request.
hitsAddend := utils.Max(1, request.HitsAddend)
hitsAddend := utils.Max(this.baseRateLimiter.HitsAddendMinValue, request.HitsAddend)

// First build a list of all cache keys that we are actually going to hit.
cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend)
Expand Down Expand Up @@ -302,7 +302,7 @@ func runAsync(task func()) {
}

func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand,
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string,
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string, hitsAddendMinValue uint32,
) limiter.RateLimitCache {
return &rateLimitMemcacheImpl{
client: client,
Expand All @@ -311,7 +311,7 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan
expirationJitterMaxSeconds: expirationJitterMaxSeconds,
localCache: localCache,
nearLimitRatio: nearLimitRatio,
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager),
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager, hitsAddendMinValue),
}
}

Expand All @@ -327,5 +327,6 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim
statsManager,
s.NearLimitRatio,
s.CacheKeyPrefix,
s.HitsAddendMinValue,
)
}
1 change: 1 addition & 0 deletions src/redis/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca
s.CacheKeyPrefix,
statsManager,
s.StopCacheKeyIncrementWhenOverlimit,
s.HitsAddendMinValue,
), closer
}
6 changes: 3 additions & 3 deletions src/redis/fixed_cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit(
logger.Debugf("starting cache lookup")

// request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request.
hitsAddend := utils.Max(1, request.HitsAddend)
hitsAddend := utils.Max(this.baseRateLimiter.HitsAddendMinValue, request.HitsAddend)

// First build a list of all cache keys that we are actually going to hit.
cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend)
Expand Down Expand Up @@ -218,12 +218,12 @@ func (this *fixedRateLimitCacheImpl) Flush() {}

func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource,
jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager,
stopCacheKeyIncrementWhenOverlimit bool,
stopCacheKeyIncrementWhenOverlimit bool, hitsAddendMinValue uint32,
) limiter.RateLimitCache {
return &fixedRateLimitCacheImpl{
client: client,
perSecondClient: perSecondClient,
stopCacheKeyIncrementWhenOverlimit: stopCacheKeyIncrementWhenOverlimit,
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager),
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager, hitsAddendMinValue),
}
}
1 change: 1 addition & 0 deletions src/settings/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ type Settings struct {
CacheKeyPrefix string `envconfig:"CACHE_KEY_PREFIX" default:""`
BackendType string `envconfig:"BACKEND_TYPE" default:"redis"`
StopCacheKeyIncrementWhenOverlimit bool `envconfig:"STOP_CACHE_KEY_INCREMENT_WHEN_OVERLIMIT" default:"false"`
HitsAddendMinValue uint32 `envconfig:"DEFAULT_HITS_ADDEND_MIN_VALUE" default:"1"`
Copy link
Contributor

@arkodg arkodg Dec 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we do w/o this extra field ?
we'll need to make HitsAddend optional/ptr in the API https://github.com/envoyproxy/go-control-plane/blob/e0e383246f4cd59a40967987057c76f03526cffb/envoy/service/ratelimit/v3/rls.pb.go#L179
which may be a breaking change
cc @zirain @mathetake @wbpcode

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if this still defaults to 1 then i don't think this would be a breaking change in practice i guess? but ack we should need to allow zero in the API regardless

Copy link
Author

@gcalmettes gcalmettes Dec 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@arkodg I confirm that this is not a breaking change and that the HitsAddendMinValue field introduced here is not needed in the RatelimitRequest request send to the ratelimit service.

This is only a config variable of the reatelimit service, with default value of 1 that matches the current hardcoded value of 1.

@mathetake there is no change to be done to the API, as zero is already a valid value for the HitsAddend value. In fact it is the default value when no custom HitsAddend value is set via the Set-Filter-State HTTP filter or directly in the RatelimitRequest and the actual +1 increment is enforced at the ratelimit service level (if the hitAddend value is set to zero in the request -- either voluntarily or because it is absent in the request-- the value is override to 1).

Currently, if someone uses the FilterState to enforce the hitsAddend value voluntarily to zero (we have a use case for that) or set the HitsAddend value to zero in the RatelimitRequest, then in practice the limit counter will still be incremented because of the max(request.HitsAddend, 1) check that is done, which is not expected.

What this PR does basically is just to allow to change this hardcoded 1 to any value >=0 (same constraints than the api, but still keeping 1 as default value to not change the actual behavior) to allow RatelimitRequests with zero increment.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hey @gcalmettes, we understand the issue and our use case is the same as yours :)
I was questioning the current RLS API and the semantics defined in RateLimitRequest , since its not a pointer, there's no way to figure out unset (default:1 ) vs set to 0.
It maybe too late to edit that API, the easiest thing to do maybe is

  • set the default value to 1 in envoy if HitsAddend is not set
  • update the code in this project to max(request.HitsAddend, 0) simplifying the logic and maintaining backwards compatibility
    wdyt @mathetake @wbpcode

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sounds like you had the same idea @mathetake #729 (comment)


// Settings for optional returning of custom headers
RateLimitResponseHeadersEnabled bool `envconfig:"LIMIT_RESPONSE_HEADERS_ENABLED" default:"false"`
Expand Down
Binary file modified test/integration/dump.rdb
Binary file not shown.
24 changes: 12 additions & 12 deletions test/limiter/base_limiter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestGenerateCacheKeys(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
timeSource.EXPECT().UnixNow().Return(int64(1234))
baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm, 1)
request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)}
assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value())
Expand All @@ -46,7 +46,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
timeSource.EXPECT().UnixNow().Return(int64(1234))
baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm, 1)
request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)}
assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value())
Expand All @@ -63,7 +63,7 @@ func TestOverLimitWithLocalCache(t *testing.T) {
localCache := freecache.NewCache(100)
localCache.Set([]byte("key"), []byte("value"), 100)
sm := mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))
baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm, 1)
// Returns true, as local cache contains over limit value for the key.
assert.Equal(true, baseRateLimit.IsOverLimitWithLocalCache("key"))
}
Expand All @@ -73,11 +73,11 @@ func TestNoOverLimitWithLocalCache(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
sm := mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))
baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm, 1)
// Returns false, as local cache is nil.
assert.Equal(false, baseRateLimit.IsOverLimitWithLocalCache("domain_key_value_1234"))
localCache := freecache.NewCache(100)
baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm)
baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm, 1)
// Returns false, as local cache does not contain value for cache key.
assert.Equal(false, baseRateLimitWithLocalCache.IsOverLimitWithLocalCache("domain_key_value_1234"))
}
Expand All @@ -87,7 +87,7 @@ func TestGetResponseStatusEmptyKey(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
sm := mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))
baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm, 1)
responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1)
assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode())
assert.Equal(uint32(0), responseStatus.GetLimitRemaining())
Expand All @@ -101,7 +101,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) {
timeSource.EXPECT().UnixNow().Return(int64(1234))
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm, 1)
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5)
// As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits.
Expand All @@ -123,7 +123,7 @@ func TestGetResponseStatusOverLimitWithLocalCacheShadowMode(t *testing.T) {
timeSource.EXPECT().UnixNow().Return(int64(1234))
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm, 1)
// This limit is in ShadowMode
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5)
Expand All @@ -148,7 +148,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
localCache := freecache.NewCache(100)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm, 1)
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5)
responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1)
Expand All @@ -173,7 +173,7 @@ func TestGetResponseStatusOverLimitShadowMode(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
localCache := freecache.NewCache(100)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm, 1)
// Key is in shadow_mode: true
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5)
Expand All @@ -196,7 +196,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) {
timeSource.EXPECT().UnixNow().Return(int64(1234))
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm, 1)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10)
responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1)
Expand All @@ -217,7 +217,7 @@ func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) {
timeSource.EXPECT().UnixNow().Return(int64(1234))
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm, 1)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10)
responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1)
Expand Down
14 changes: 7 additions & 7 deletions test/memcached/cache_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func TestMemcached(t *testing.T) {
client := mock_memcached.NewMockClient(controller)
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "", 1)

timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3)
client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return(
Expand Down Expand Up @@ -141,7 +141,7 @@ func TestMemcachedGetError(t *testing.T) {
client := mock_memcached.NewMockClient(controller)
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "", 1)

timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3)
client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return(
Expand Down Expand Up @@ -229,7 +229,7 @@ func TestOverLimitWithLocalCache(t *testing.T) {
sink := &common.TestStatSink{}
statsStore := stats.NewStore(sink, true)
sm := mockstats.NewMockStatManager(statsStore)
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, sm, 0.8, "", 1)
localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache"))

// Test Near Limit Stats. Under Near Limit Ratio
Expand Down Expand Up @@ -331,7 +331,7 @@ func TestNearLimit(t *testing.T) {
client := mock_memcached.NewMockClient(controller)
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "", 1)

// Test Near Limit Stats. Under Near Limit Ratio
timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3)
Expand Down Expand Up @@ -513,7 +513,7 @@ func TestMemcacheWithJitter(t *testing.T) {
jitterSource := mock_utils.NewMockJitterRandSource(controller)
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, sm, 0.8, "", 1)

timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3)
jitterSource.EXPECT().Int63().Return(int64(100))
Expand Down Expand Up @@ -556,7 +556,7 @@ func TestMemcacheAdd(t *testing.T) {
client := mock_memcached.NewMockClient(controller)
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "", 1)

// Test a race condition with the initial add
timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3)
Expand Down Expand Up @@ -665,7 +665,7 @@ func TestMemcachedTracer(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)

cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "")
cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "", 1)

timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3)
client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return(
Expand Down
2 changes: 1 addition & 1 deletion test/redis/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ func BenchmarkParallelDoLimit(b *testing.B) {
client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil, false, nil)
defer client.Close()

cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm, true)
cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm, true, 1)
request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1)
limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)}

Expand Down
Loading