Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Performance] Avoid lock contention for getting count of inner dictionary size #2807

Open
wants to merge 1 commit into
base: dev
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 22 additions & 5 deletions src/Microsoft.IdentityModel.Tokens/EventBasedLRUCache.cs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ internal class EventBasedLRUCache<TKey, TValue>
// if true, then items will be maintained in a LRU fashion, moving to front of list when accessed in the cache.
private readonly bool _maintainLRU;
private ConcurrentDictionary<TKey, LRUCacheItem<TKey, TValue>> _map;
// ConcurrentDictionary<TK,TV>.get_Count() takes a lock where performance is a concern, so we maintain a separate count updated by atomic for size calculation.
private int _mapSize;
// When the current cache size gets to this percentage of _capacity, _compactionPercentage% of the cache will be removed.
private readonly double _maxCapacityPercentage = .95;
private readonly int _compactIntervalInSeconds;
Expand Down Expand Up @@ -130,6 +132,7 @@ internal EventBasedLRUCache(
_capacity = capacity > 0 ? capacity : throw LogHelper.LogExceptionMessage(new ArgumentOutOfRangeException(nameof(capacity)));
_options = options;
_map = new ConcurrentDictionary<TKey, LRUCacheItem<TKey, TValue>>(comparer ?? EqualityComparer<TKey>.Default);
_mapSize = 0;
_removeExpiredValuesIntervalInSeconds = removeExpiredValuesIntervalInSeconds;
_removeExpiredValues = removeExpiredValues;
_compactIntervalInSeconds = compactIntervalInSeconds;
Expand Down Expand Up @@ -261,7 +264,10 @@ internal void RemoveExpiredValuesLRU()
{
_doubleLinkedList.Remove(node);
if (_map.TryRemove(node.Value.Key, out LRUCacheItem<TKey, TValue> cacheItem))
{
Interlocked.Decrement(ref _mapSize);
OnItemExpired?.Invoke(cacheItem.Value);
}
}

node = nextNode;
Expand Down Expand Up @@ -295,7 +301,10 @@ internal void RemoveExpiredValues()
if (node.Value.ExpirationTime < DateTime.UtcNow)
{
if (_map.TryRemove(node.Value.Key, out var cacheItem))
{
Interlocked.Decrement(ref _mapSize);
OnItemExpired?.Invoke(cacheItem.Value);
}
}
}
}
Expand Down Expand Up @@ -354,11 +363,14 @@ private void CompactLRU()
try
{
int newCacheSize = CalculateNewCacheSize();
while (_map.Count > newCacheSize && _doubleLinkedList.Count > 0)
while (_mapSize > newCacheSize && _doubleLinkedList.Count > 0)
{
LinkedListNode<LRUCacheItem<TKey, TValue>> node = _doubleLinkedList.Last;
if (_map.TryRemove(node.Value.Key, out LRUCacheItem<TKey, TValue> cacheItem))
{
Interlocked.Decrement(ref _mapSize);
OnItemMovedToCompactedList?.Invoke(cacheItem.Value);
}

_compactedItems.Add(cacheItem);
_doubleLinkedList.RemoveLast();
Expand All @@ -379,7 +391,7 @@ private void Compact()
try
{
int newCacheSize = CalculateNewCacheSize();
while (_map.Count > newCacheSize)
while (_mapSize > newCacheSize)
{
// Since all items could have been removed by the public TryRemove() method, leaving the map empty, we need to check if a default value is returned.
// Remove the item from the map only if the returned item is NOT default value.
Expand All @@ -388,6 +400,7 @@ private void Compact()
{
if (_map.TryRemove(item.Key, out LRUCacheItem<TKey, TValue> cacheItem))
{
Interlocked.Decrement(ref _mapSize);
OnItemMovedToCompactedList?.Invoke(cacheItem.Value);
_compactedItems.Add(cacheItem);
}
Expand All @@ -408,7 +421,7 @@ private void Compact()
protected int CalculateNewCacheSize()
{
// use the smaller of _map.Count and _capacity
int currentCount = Math.Min(_map.Count, _capacity);
int currentCount = Math.Min(_mapSize, _capacity);

// use the _capacity for the newCacheSize calculation in the case where the cache is experiencing overflow
return currentCount - (int)(currentCount * _compactionPercentage);
Expand Down Expand Up @@ -463,7 +476,7 @@ public bool SetValue(TKey key, TValue value, DateTime expirationTime)
else
{
// if cache is at _maxCapacityPercentage, trim it by _compactionPercentage
if ((double)_map.Count / _capacity >= _maxCapacityPercentage)
if ((double)_mapSize / _capacity >= _maxCapacityPercentage)
{
if (Interlocked.CompareExchange(ref _compactValuesState, ActionQueuedOrRunning, ActionNotQueued) == ActionNotQueued)
{
Expand Down Expand Up @@ -498,6 +511,7 @@ public bool SetValue(TKey key, TValue value, DateTime expirationTime)
}

_map[key] = newCacheItem;
Interlocked.Increment(ref _mapSize);
Copy link
Member

@brentschmaltz brentschmaltz Sep 20, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_map[key] = newCacheItem, does not imply the count has increased.

}

return true;
Expand Down Expand Up @@ -592,6 +606,8 @@ public bool TryRemove(TKey key)
return false;

OnItemMovedToCompactedList?.Invoke(cacheItem.Value);
Interlocked.Decrement(ref _mapSize);

return true;
}

Expand All @@ -616,6 +632,7 @@ public bool TryRemove(TKey key, out TValue value)

value = cacheItem.Value;
OnItemMovedToCompactedList?.Invoke(cacheItem.Value);
Interlocked.Decrement(ref _mapSize);

return true;
}
Expand All @@ -636,7 +653,7 @@ public bool TryRemove(TKey key, out TValue value)
/// <summary>
/// FOR TESTING ONLY.
/// </summary>
internal long MapCount => _map.Count;
internal long MapCount => _mapSize;

/// <summary>
/// FOR TESTING ONLY.
Expand Down
Loading