Skip to content
This repository has been archived by the owner on Oct 9, 2023. It is now read-only.

WIP #27

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open

WIP #27

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions utils/auto_refresh_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ func (w *autoRefreshCache) GetOrCreate(item CacheItem) (CacheItem, error) {
// * Sync loop updates item 2, repeat
func (w *autoRefreshCache) sync(ctx context.Context) {
keys := w.lruMap.Keys()
//logger.Infof(ctx, "Keys count: %v", len(keys))
for _, k := range keys {
// If not ok, it means evicted between the item was evicted between getting the keys and this update loop
// which is fine, we can just ignore.
Expand Down
91 changes: 91 additions & 0 deletions utils/auto_refresh_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@ package utils
import (
"context"
"fmt"
"math"
rand2 "math/rand"
"strconv"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -34,6 +38,93 @@ func syncFakeItemAlwaysDelete(_ context.Context, obj CacheItem) (CacheItem, Cach
return obj, Delete, nil
}

func noopSync(_ context.Context, obj CacheItem) (CacheItem, CacheSyncAction, error) {
return obj, Unchanged, nil
}

func sometimesUpdateOnSync(percentage float32) CacheSyncItem {
r := rand2.New(rand2.NewSource(time.Now().Unix()))
p := int(percentage * 100)
return func(_ context.Context, obj CacheItem) (CacheItem, CacheSyncAction, error) {
if r.Int()%100 < p {
return obj, Update, nil
}

return obj, Unchanged, nil
}
}

func TestSometimesUpdateOnSync(t *testing.T) {
for expected := 1.0; expected <= 10; expected++ {
perc := float32(expected / 100.0)
f := sometimesUpdateOnSync(perc)
updateCount := 0
for i := 0; i < 10000; i++ {
for j := 0; j < 100; j++ {
_, action, err := f(nil, nil)
assert.NoError(t, err)
if action == Update {
updateCount++
}
}
}

actual := float64(updateCount / 10000.0)
assert.True(t, expected <= math.Ceil(actual)+1 && expected >= math.Floor(actual)-1,
"Expected: %v, Actual: %v", expected, actual)
}
}

func BenchmarkCache(b *testing.B) {
testResyncPeriod := time.Second
rateLimiter := NewRateLimiter("mockLimiter", 100, 1)
// the size of the cache is at least as large as the number of items we're storing
itemCount := b.N
cache, err := NewAutoRefreshCache(sometimesUpdateOnSync(1), rateLimiter, testResyncPeriod, itemCount*2, nil)
assert.NoError(b, err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cache.Start(ctx)

startIdx := 1

wg := sync.WaitGroup{}
for n := 0; n < b.N; n++ {
wg.Add(itemCount)
// Create items in the cache
for i := 1; i <= itemCount; i++ {
go func(itemId int) {
defer wg.Done()
_, err := cache.GetOrCreate(fakeCacheItem{
id: fmt.Sprintf("%d", itemId),
val: itemId,
})

assert.NoError(b, err)
}(i + startIdx)
}

wg.Wait()

// Wait half a second for all resync periods to complete
wg.Add(itemCount)
for i := 1; i <= itemCount; i++ {
go func(itemId int) {
defer wg.Done()
item := cache.Get(fmt.Sprintf("%d", itemId))
assert.NotNil(b, item, "item #%v", itemId)
if item != nil {
assert.Equal(b, strconv.Itoa(itemId), item.(fakeCacheItem).ID())
}
}(i + startIdx)
}

wg.Wait()
startIdx += itemCount
}
}

func TestCacheTwo(t *testing.T) {
testResyncPeriod := time.Millisecond
rateLimiter := NewRateLimiter("mockLimiter", 100, 1)
Expand Down