diff --git a/libs/host/Configuration/Options.cs b/libs/host/Configuration/Options.cs
index 96fdd62433..2e96a1a6ba 100644
--- a/libs/host/Configuration/Options.cs
+++ b/libs/host/Configuration/Options.cs
@@ -233,6 +233,9 @@ internal sealed class Options
[IntRangeValidation(0, int.MaxValue)]
[Option("compaction-freq", Required = false, HelpText = "Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead)")]
public int CompactionFrequencySecs { get; set; }
+ [IntRangeValidation(0, int.MaxValue)]
+ [Option("hcollect-freq", Required = false, HelpText = "Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. Use the HCOLLECT API to collect on-demand.")]
+ public int HashCollectFrequencySecs { get; set; }
[Option("compaction-type", Required = false, HelpText = "Hybrid log compaction type. Value options: None - no compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss)")]
public LogCompactionType CompactionType { get; set; }
@@ -652,6 +655,7 @@ public GarnetServerOptions GetServerOptions(ILogger logger = null)
WaitForCommit = WaitForCommit.GetValueOrDefault(),
AofSizeLimit = AofSizeLimit,
CompactionFrequencySecs = CompactionFrequencySecs,
+ HashCollectFrequencySecs = HashCollectFrequencySecs,
CompactionType = CompactionType,
CompactionForceDelete = CompactionForceDelete.GetValueOrDefault(),
CompactionMaxSegments = CompactionMaxSegments,
diff --git a/libs/host/defaults.conf b/libs/host/defaults.conf
index aa18841004..977b0f5b0b 100644
--- a/libs/host/defaults.conf
+++ b/libs/host/defaults.conf
@@ -162,6 +162,9 @@
/* Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) */
"CompactionFrequencySecs" : 0,
+ /* Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. Use the HCOLLECT API to collect on-demand. */
+ "HashCollectFrequencySecs" : 0,
+
/* Hybrid log compaction type. Value options: */
/* None - no compaction */
/* Shift - shift begin address without compaction (data loss) */
diff --git a/libs/resources/RespCommandsDocs.json b/libs/resources/RespCommandsDocs.json
index b9701595ad..558edc9fcc 100644
--- a/libs/resources/RespCommandsDocs.json
+++ b/libs/resources/RespCommandsDocs.json
@@ -2751,6 +2751,12 @@
}
]
},
+ {
+ "Command": "HCOLLECT",
+ "Name": "HCOLLECT",
+ "Summary": "Manually trigger deletion of expired fields from memory",
+ "Group": "Hash"
+ },
{
"Command": "HDEL",
"Name": "HDEL",
@@ -2848,6 +2854,201 @@
}
]
},
+ {
+ "Command": "HEXPIRE",
+ "Name": "HEXPIRE",
+ "Summary": "Set expiry for hash field using relative time to expire (seconds)",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "SECONDS",
+ "DisplayText": "seconds",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "CONDITION",
+ "Type": "OneOf",
+ "ArgumentFlags": "Optional",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NX",
+ "DisplayText": "nx",
+ "Type": "PureToken",
+ "Token": "NX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "XX",
+ "DisplayText": "xx",
+ "Type": "PureToken",
+ "Token": "XX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "GT",
+ "DisplayText": "gt",
+ "Type": "PureToken",
+ "Token": "GT"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "LT",
+ "DisplayText": "lt",
+ "Type": "PureToken",
+ "Token": "LT"
+ }
+ ]
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Command": "HEXPIREAT",
+ "Name": "HEXPIREAT",
+ "Summary": "Set expiry for hash field using an absolute Unix timestamp (seconds)",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "UNIX-TIME-SECONDS",
+ "DisplayText": "unix-time-seconds",
+ "Type": "UnixTime"
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "CONDITION",
+ "Type": "OneOf",
+ "ArgumentFlags": "Optional",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NX",
+ "DisplayText": "nx",
+ "Type": "PureToken",
+ "Token": "NX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "XX",
+ "DisplayText": "xx",
+ "Type": "PureToken",
+ "Token": "XX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "GT",
+ "DisplayText": "gt",
+ "Type": "PureToken",
+ "Token": "GT"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "LT",
+ "DisplayText": "lt",
+ "Type": "PureToken",
+ "Token": "LT"
+ }
+ ]
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Command": "HEXPIRETIME",
+ "Name": "HEXPIRETIME",
+ "Summary": "Returns the expiration time of a hash field as a Unix timestamp, in seconds.",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
{
"Command": "HGET",
"Name": "HGET",
@@ -3035,6 +3236,275 @@
}
]
},
+ {
+ "Command": "HPERSIST",
+ "Name": "HPERSIST",
+ "Summary": "Removes the expiration time for each specified field",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Command": "HPEXPIRE",
+ "Name": "HPEXPIRE",
+ "Summary": "Set expiry for hash field using relative time to expire (milliseconds)",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "MILLISECONDS",
+ "DisplayText": "milliseconds",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "CONDITION",
+ "Type": "OneOf",
+ "ArgumentFlags": "Optional",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NX",
+ "DisplayText": "nx",
+ "Type": "PureToken",
+ "Token": "NX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "XX",
+ "DisplayText": "xx",
+ "Type": "PureToken",
+ "Token": "XX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "GT",
+ "DisplayText": "gt",
+ "Type": "PureToken",
+ "Token": "GT"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "LT",
+ "DisplayText": "lt",
+ "Type": "PureToken",
+ "Token": "LT"
+ }
+ ]
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Command": "HPEXPIREAT",
+ "Name": "HPEXPIREAT",
+ "Summary": "Set expiry for hash field using an absolute Unix timestamp (milliseconds)",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "UNIX-TIME-MILLISECONDS",
+ "DisplayText": "unix-time-milliseconds",
+ "Type": "UnixTime"
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "CONDITION",
+ "Type": "OneOf",
+ "ArgumentFlags": "Optional",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NX",
+ "DisplayText": "nx",
+ "Type": "PureToken",
+ "Token": "NX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "XX",
+ "DisplayText": "xx",
+ "Type": "PureToken",
+ "Token": "XX"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "GT",
+ "DisplayText": "gt",
+ "Type": "PureToken",
+ "Token": "GT"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "LT",
+ "DisplayText": "lt",
+ "Type": "PureToken",
+ "Token": "LT"
+ }
+ ]
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Command": "HPEXPIRETIME",
+ "Name": "HPEXPIRETIME",
+ "Summary": "Returns the expiration time of a hash field as a Unix timestamp, in msec.",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Command": "HPTTL",
+ "Name": "HPTTL",
+ "Summary": "Returns the TTL in milliseconds of a hash field.",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
{
"Command": "HRANDFIELD",
"Name": "HRANDFIELD",
@@ -3197,6 +3667,43 @@
}
]
},
+ {
+ "Command": "HTTL",
+ "Name": "HTTL",
+ "Summary": "Returns the TTL in seconds of a hash field.",
+ "Group": "Hash",
+ "Complexity": "O(N) where N is the number of specified fields",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandKeyArgument",
+ "Name": "KEY",
+ "DisplayText": "key",
+ "Type": "Key",
+ "KeySpecIndex": 0
+ },
+ {
+ "TypeDiscriminator": "RespCommandContainerArgument",
+ "Name": "FIELDS",
+ "Type": "Block",
+ "Token": "FIELDS",
+ "Arguments": [
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "NUMFIELDS",
+ "DisplayText": "numfields",
+ "Type": "Integer"
+ },
+ {
+ "TypeDiscriminator": "RespCommandBasicArgument",
+ "Name": "FIELD",
+ "DisplayText": "field",
+ "Type": "String",
+ "ArgumentFlags": "Multiple"
+ }
+ ]
+ }
+ ]
+ },
{
"Command": "HVALS",
"Name": "HVALS",
diff --git a/libs/resources/RespCommandsInfo.json b/libs/resources/RespCommandsInfo.json
index b131275ee7..3b8a3e8f92 100644
--- a/libs/resources/RespCommandsInfo.json
+++ b/libs/resources/RespCommandsInfo.json
@@ -1546,6 +1546,31 @@
}
]
},
+ {
+ "Command": "HCOLLECT",
+ "Name": "HCOLLECT",
+ "Arity": 2,
+ "Flags": "Admin, Write",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Write, Admin, Garnet",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RW, Access, Update"
+ }
+ ]
+ },
{
"Command": "HDEL",
"Name": "HDEL",
@@ -1603,6 +1628,81 @@
}
]
},
+ {
+ "Command": "HEXPIRE",
+ "Name": "HEXPIRE",
+ "Arity": -6,
+ "Flags": "DenyOom, Fast, Write",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Write",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RW, Update"
+ }
+ ]
+ },
+ {
+ "Command": "HEXPIREAT",
+ "Name": "HEXPIREAT",
+ "Arity": -6,
+ "Flags": "DenyOom, Fast, Write",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Write",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RW, Update"
+ }
+ ]
+ },
+ {
+ "Command": "HEXPIRETIME",
+ "Name": "HEXPIRETIME",
+ "Arity": -5,
+ "Flags": "Fast, ReadOnly",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Read",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RO, Access"
+ }
+ ]
+ },
{
"Command": "HGET",
"Name": "HGET",
@@ -1809,6 +1909,131 @@
}
]
},
+ {
+ "Command": "HPERSIST",
+ "Name": "HPERSIST",
+ "Arity": -5,
+ "Flags": "Fast, Write",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Write",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RW, Update"
+ }
+ ]
+ },
+ {
+ "Command": "HPEXPIRE",
+ "Name": "HPEXPIRE",
+ "Arity": -6,
+ "Flags": "DenyOom, Fast, Write",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Write",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RW, Update"
+ }
+ ]
+ },
+ {
+ "Command": "HPEXPIREAT",
+ "Name": "HPEXPIREAT",
+ "Arity": -6,
+ "Flags": "DenyOom, Fast, Write",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Write",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RW, Update"
+ }
+ ]
+ },
+ {
+ "Command": "HPEXPIRETIME",
+ "Name": "HPEXPIRETIME",
+ "Arity": -5,
+ "Flags": "Fast, ReadOnly",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Read",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RO, Access"
+ }
+ ]
+ },
+ {
+ "Command": "HPTTL",
+ "Name": "HPTTL",
+ "Arity": -5,
+ "Flags": "Fast, ReadOnly",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Read",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RO, Access"
+ }
+ ]
+ },
{
"Command": "HRANDFIELD",
"Name": "HRANDFIELD",
@@ -1940,6 +2165,31 @@
}
]
},
+ {
+ "Command": "HTTL",
+ "Name": "HTTL",
+ "Arity": -5,
+ "Flags": "Fast, ReadOnly",
+ "FirstKey": 1,
+ "LastKey": 1,
+ "Step": 1,
+ "AclCategories": "Hash, Fast, Read",
+ "KeySpecifications": [
+ {
+ "BeginSearch": {
+ "TypeDiscriminator": "BeginSearchIndex",
+ "Index": 1
+ },
+ "FindKeys": {
+ "TypeDiscriminator": "FindKeysRange",
+ "LastKey": 0,
+ "KeyStep": 1,
+ "Limit": 0
+ },
+ "Flags": "RO, Access"
+ }
+ ]
+ },
{
"Command": "HVALS",
"Name": "HVALS",
diff --git a/libs/server/API/GarnetApiObjectCommands.cs b/libs/server/API/GarnetApiObjectCommands.cs
index 42cf34f17d..f9eaece463 100644
--- a/libs/server/API/GarnetApiObjectCommands.cs
+++ b/libs/server/API/GarnetApiObjectCommands.cs
@@ -496,10 +496,25 @@ public GarnetStatus HashIncrement(byte[] key, ArgSlice input, out ObjectOutputHe
public GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter)
=> storageSession.HashIncrement(key, ref input, ref outputFooter, ref objectContext);
+ ///
+ public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter)
+ => storageSession.HashExpire(key, expireAt, isMilliseconds, expireOption, ref input, ref outputFooter, ref objectContext);
+
+ ///
+ public GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter)
+ => storageSession.HashPersist(key, ref input, ref outputFooter, ref objectContext);
+
///
public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items)
=> storageSession.ObjectScan(GarnetObjectType.Hash, key, cursor, match, count, out items, ref objectContext);
+ ///
+ public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter)
+ => storageSession.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter, ref objectContext);
+
+ public GarnetStatus HashCollect(ReadOnlySpan keys, ref ObjectInput input)
+ => storageSession.HashCollect(keys, ref input, ref objectContext);
+
#endregion
}
diff --git a/libs/server/API/GarnetWatchApi.cs b/libs/server/API/GarnetWatchApi.cs
index 2cec273074..083c237771 100644
--- a/libs/server/API/GarnetWatchApi.cs
+++ b/libs/server/API/GarnetWatchApi.cs
@@ -472,6 +472,13 @@ public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count,
return garnetApi.HashScan(key, cursor, match, count, out items);
}
+ ///
+ public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter)
+ {
+ garnetApi.WATCH(key, StoreType.Object);
+ return garnetApi.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter);
+ }
+
#endregion
#region Bitmap Methods
diff --git a/libs/server/API/IGarnetApi.cs b/libs/server/API/IGarnetApi.cs
index 25a028e19a..1ebf472b0b 100644
--- a/libs/server/API/IGarnetApi.cs
+++ b/libs/server/API/IGarnetApi.cs
@@ -995,6 +995,33 @@ public interface IGarnetApi : IGarnetReadApi, IGarnetAdvancedApi
///
GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter);
+ ///
+ /// Sets an expiration time on a hash field.
+ ///
+ /// The key of the hash.
+ /// The expiration time in Unix timestamp format.
+ /// The expiration option to apply.
+ /// The input object containing additional parameters.
+ /// The output object to store the result.
+ /// The status of the operation.
+ GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter);
+
+ ///
+ /// Persists the specified hash key, removing any expiration time set on it.
+ ///
+ /// The key of the hash to persist.
+ /// The input object containing additional parameters.
+ /// The output object to store the result.
+ /// The status of the operation.
+ GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter);
+
+ ///
+ /// Delete already expired fields from the hash.
+ ///
+ /// The keys of the hash.
+ /// The status of the operation.
+ GarnetStatus HashCollect(ReadOnlySpan keys, ref ObjectInput input);
+
#endregion
#region BitMaps Methods
@@ -1661,6 +1688,17 @@ public interface IGarnetReadApi
///
GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items);
+ ///
+ /// Returns the time to live for a hash key.
+ ///
+ /// The key of the hash.
+ /// Indicates if the time to live is in milliseconds.
+ /// Indicates if the time to live is a timestamp.
+ /// The input object containing additional parameters.
+ /// The output object to store the result.
+ /// The status of the operation.
+ GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter);
+
#endregion
#region Bitmaps Methods
diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs
index bfa3a8b410..f9d1fbc95a 100644
--- a/libs/server/Objects/Hash/HashObject.cs
+++ b/libs/server/Objects/Hash/HashObject.cs
@@ -5,6 +5,8 @@
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
+using System.Linq;
+using System.Runtime.CompilerServices;
using Garnet.common;
using Tsavorite.core;
@@ -17,6 +19,10 @@ namespace Garnet.server
///
public enum HashOperation : byte
{
+ HCOLLECT,
+ HEXPIRE,
+ HTTL,
+ HPERSIST,
HGET,
HMGET,
HSET,
@@ -42,6 +48,11 @@ public enum HashOperation : byte
public unsafe partial class HashObject : GarnetObjectBase
{
readonly Dictionary hash;
+ Dictionary expirationTimes;
+ PriorityQueue expirationQueue;
+
+ // Byte #31 is used to denote if key has expiration (1) or not (0)
+ private const int ExpirationBitMask = 1 << 31;
///
/// Constructor
@@ -63,9 +74,29 @@ public HashObject(BinaryReader reader)
int count = reader.ReadInt32();
for (int i = 0; i < count; i++)
{
- var item = reader.ReadBytes(reader.ReadInt32());
+ var keyLength = reader.ReadInt32();
+ var hasExpiration = (keyLength & ExpirationBitMask) != 0;
+ keyLength &= ~ExpirationBitMask;
+ var item = reader.ReadBytes(keyLength);
var value = reader.ReadBytes(reader.ReadInt32());
- hash.Add(item, value);
+
+ if (hasExpiration)
+ {
+ var expiration = reader.ReadInt64();
+ var isExpired = expiration < DateTimeOffset.UtcNow.Ticks;
+ if (!isExpired)
+ {
+ hash.Add(item, value);
+ InitializeExpirationStructures();
+ expirationTimes.Add(item, expiration);
+ expirationQueue.Enqueue(item, expiration);
+ UpdateExpirationSize(item, true);
+ }
+ }
+ else
+ {
+ hash.Add(item, value);
+ }
this.UpdateSize(item, value);
}
@@ -74,10 +105,12 @@ public HashObject(BinaryReader reader)
///
/// Copy constructor
///
- public HashObject(Dictionary hash, long expiration, long size)
+ public HashObject(Dictionary hash, Dictionary expirationTimes, PriorityQueue expirationQueue, long expiration, long size)
: base(expiration, size)
{
this.hash = hash;
+ this.expirationTimes = expirationTimes;
+ this.expirationQueue = expirationQueue;
}
///
@@ -88,16 +121,30 @@ public override void DoSerialize(BinaryWriter writer)
{
base.DoSerialize(writer);
- int count = hash.Count;
+ DeleteExpiredItems();
+
+ int count = hash.Count; // Since expired items are already deleted, no need to worry about expiring items
writer.Write(count);
foreach (var kvp in hash)
{
+ if (expirationTimes is not null && expirationTimes.TryGetValue(kvp.Key, out var expiration))
+ {
+ writer.Write(kvp.Key.Length | ExpirationBitMask);
+ writer.Write(kvp.Key);
+ writer.Write(kvp.Value.Length);
+ writer.Write(kvp.Value);
+ writer.Write(expiration);
+ count--;
+ continue;
+ }
+
writer.Write(kvp.Key.Length);
writer.Write(kvp.Key);
writer.Write(kvp.Value.Length);
writer.Write(kvp.Value);
count--;
}
+
Debug.Assert(count == 0);
}
@@ -105,7 +152,7 @@ public override void DoSerialize(BinaryWriter writer)
public override void Dispose() { }
///
- public override GarnetObjectBase Clone() => new HashObject(hash, Expiration, Size);
+ public override GarnetObjectBase Clone() => new HashObject(hash, expirationTimes, expirationQueue, Expiration, Size);
///
public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory output, out long sizeChange, out bool removeKey)
@@ -152,6 +199,15 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory
case HashOperation.HEXISTS:
HashExists(ref input, _output);
break;
+ case HashOperation.HEXPIRE:
+ HashExpire(ref input, ref output);
+ break;
+ case HashOperation.HTTL:
+ HashTimeToLive(ref input, ref output);
+ break;
+ case HashOperation.HPERSIST:
+ HashPersist(ref input, ref output);
+ break;
case HashOperation.HKEYS:
HashGetKeysOrValues(ref input, ref output);
break;
@@ -170,6 +226,9 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory
case HashOperation.HRANDFIELD:
HashRandomField(ref input, ref output);
break;
+ case HashOperation.HCOLLECT:
+ HashCollect(ref input, _output);
+ break;
case HashOperation.HSCAN:
if (ObjectUtils.ReadScanInput(ref input, ref output, out var cursorInput, out var pattern,
out var patternLength, out var limitCount, out bool isNoValue, out var error))
@@ -202,6 +261,38 @@ private void UpdateSize(ReadOnlySpan key, ReadOnlySpan value, bool a
Debug.Assert(this.Size >= MemoryUtils.DictionaryOverhead);
}
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void InitializeExpirationStructures()
+ {
+ if (expirationTimes is null)
+ {
+ expirationTimes = new Dictionary(ByteArrayComparer.Instance);
+ expirationQueue = new PriorityQueue();
+ this.Size += MemoryUtils.DictionaryOverhead + MemoryUtils.PriorityQueueOverhead;
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void UpdateExpirationSize(ReadOnlySpan key, bool add = true)
+ {
+ // Account for dictionary entry and priority queue entry
+ var size = IntPtr.Size + sizeof(long) + MemoryUtils.DictionaryEntryOverhead
+ + IntPtr.Size + sizeof(long) + MemoryUtils.PriorityQueueEntryOverhead;
+ this.Size += add ? size : -size;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void CleanupExpirationStructures()
+ {
+ if (expirationTimes.Count == 0)
+ {
+ this.Size -= (IntPtr.Size + sizeof(long) + MemoryUtils.PriorityQueueOverhead) * expirationQueue.Count;
+ this.Size -= MemoryUtils.DictionaryOverhead + MemoryUtils.PriorityQueueOverhead;
+ expirationTimes = null;
+ expirationQueue = null;
+ }
+ }
+
///
public override unsafe void Scan(long start, out List items, out long cursor, int count = 10, byte* pattern = default, int patternLength = 0, bool isNoValue = false)
{
@@ -217,8 +308,15 @@ public override unsafe void Scan(long start, out List items, out long cu
// Hashset has key and value, so count is multiplied by 2
count = isNoValue ? count : count * 2;
int index = 0;
+ var expiredKeysCount = 0;
foreach (var item in hash)
{
+ if (IsExpired(item.Key))
+ {
+ expiredKeysCount++;
+ continue;
+ }
+
if (index < start)
{
index++;
@@ -256,8 +354,246 @@ public override unsafe void Scan(long start, out List items, out long cu
}
// Indicates end of collection has been reached.
- if (cursor == hash.Count)
+ if (cursor + expiredKeysCount == hash.Count)
cursor = 0;
}
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsExpired(byte[] key) => expirationTimes is not null && expirationTimes.TryGetValue(key, out var expiration) && expiration < DateTimeOffset.UtcNow.Ticks;
+
+ private void DeleteExpiredItems()
+ {
+ if (expirationTimes is null)
+ return;
+
+ while (expirationQueue.TryPeek(out var key, out var expiration) && expiration < DateTimeOffset.UtcNow.Ticks)
+ {
+ // expirationTimes and expirationQueue will be out of sync when user is updating the expire time of key which already has some TTL.
+ // PriorityQueue Doesn't have update option, so we will just enqueue the new expiration and already treat expirationTimes as the source of truth
+ if (expirationTimes.TryGetValue(key, out var actualExpiration) && actualExpiration == expiration)
+ {
+ expirationTimes.Remove(key);
+ expirationQueue.Dequeue();
+ UpdateExpirationSize(key, false);
+ if (hash.TryGetValue(key, out var value))
+ {
+ hash.Remove(key);
+ UpdateSize(key, value, false);
+ }
+ }
+ else
+ {
+ expirationQueue.Dequeue();
+ this.Size -= MemoryUtils.PriorityQueueEntryOverhead + IntPtr.Size + sizeof(long);
+ }
+ }
+
+ CleanupExpirationStructures();
+ }
+
+ private bool TryGetValue(byte[] key, out byte[] value)
+ {
+ value = default;
+ if (IsExpired(key))
+ {
+ return false;
+ }
+ return hash.TryGetValue(key, out value);
+ }
+
+ private bool Remove(byte[] key, out byte[] value)
+ {
+ DeleteExpiredItems();
+ var result = hash.Remove(key, out value);
+ if (result)
+ {
+ UpdateSize(key, value, false);
+ }
+ return result;
+ }
+
+ private int Count()
+ {
+ if (expirationTimes is not null)
+ {
+ var expiredKeysCount = 0;
+ foreach (var item in expirationTimes)
+ {
+ if (IsExpired(item.Key))
+ {
+ expiredKeysCount++;
+ }
+ }
+
+ return hash.Count - expiredKeysCount;
+ }
+
+ return hash.Count;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool HasExpirableItems()
+ {
+ return expirationTimes is not null;
+ }
+
+ private bool ContainsKey(byte[] key)
+ {
+ var result = hash.ContainsKey(key);
+ if (result && IsExpired(key))
+ {
+ return false;
+ }
+
+ return result;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void Add(byte[] key, byte[] value)
+ {
+ DeleteExpiredItems();
+ hash.Add(key, value);
+ UpdateSize(key, value);
+ }
+
+ private void Set(byte[] key, byte[] value)
+ {
+ DeleteExpiredItems();
+ hash[key] = value;
+ // Skip overhead as existing item is getting replaced.
+ this.Size += Utility.RoundUp(value.Length, IntPtr.Size) -
+ Utility.RoundUp(value.Length, IntPtr.Size);
+
+ // To persist the key, if it has an expiration
+ if (expirationTimes is not null && expirationTimes.TryGetValue(key, out var currentExpiration))
+ {
+ expirationTimes.Remove(key);
+ this.Size -= IntPtr.Size + sizeof(long) + MemoryUtils.DictionaryEntryOverhead;
+ CleanupExpirationStructures();
+ }
+ }
+
+ private void SetWithoutPersist(byte[] key, byte[] value)
+ {
+ DeleteExpiredItems();
+ hash[key] = value;
+ // Skip overhead as existing item is getting replaced.
+ this.Size += Utility.RoundUp(value.Length, IntPtr.Size) -
+ Utility.RoundUp(value.Length, IntPtr.Size);
+ }
+
+ private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption)
+ {
+ if (!ContainsKey(key))
+ {
+ return -2;
+ }
+
+ if (expiration <= DateTimeOffset.UtcNow.Ticks)
+ {
+ Remove(key, out _);
+ return 2;
+ }
+
+ InitializeExpirationStructures();
+
+ if (expirationTimes.TryGetValue(key, out var currentExpiration))
+ {
+ if (expireOption.HasFlag(ExpireOption.NX))
+ {
+ return 0;
+ }
+
+ if (expireOption.HasFlag(ExpireOption.GT) && expiration <= currentExpiration)
+ {
+ return 0;
+ }
+
+ if (expireOption.HasFlag(ExpireOption.LT) && expiration >= currentExpiration)
+ {
+ return 0;
+ }
+
+ expirationTimes[key] = expiration;
+ expirationQueue.Enqueue(key, expiration);
+ // Size of dictionary entry already accounted for as the key already exists
+ this.Size += IntPtr.Size + sizeof(long) + MemoryUtils.PriorityQueueEntryOverhead;
+ }
+ else
+ {
+ if (expireOption.HasFlag(ExpireOption.XX))
+ {
+ return 0;
+ }
+
+ if (expireOption.HasFlag(ExpireOption.GT))
+ {
+ return 0;
+ }
+
+ expirationTimes[key] = expiration;
+ expirationQueue.Enqueue(key, expiration);
+ UpdateExpirationSize(key);
+ }
+
+ return 1;
+ }
+
+ private int Persist(byte[] key)
+ {
+ if (!ContainsKey(key))
+ {
+ return -2;
+ }
+
+ if (expirationTimes is not null && expirationTimes.TryGetValue(key, out var currentExpiration))
+ {
+ expirationTimes.Remove(key);
+ this.Size -= IntPtr.Size + sizeof(long) + MemoryUtils.DictionaryEntryOverhead;
+ CleanupExpirationStructures();
+ return 1;
+ }
+
+ return -1;
+ }
+
+ private long GetExpiration(byte[] key)
+ {
+ if (!ContainsKey(key))
+ {
+ return -2;
+ }
+
+ if (expirationTimes.TryGetValue(key, out var expiration))
+ {
+ return expiration;
+ }
+
+ return -1;
+ }
+
+ private KeyValuePair ElementAt(int index)
+ {
+ if (HasExpirableItems())
+ {
+ var currIndex = 0;
+ foreach (var item in hash)
+ {
+ if (IsExpired(item.Key))
+ {
+ continue;
+ }
+
+ if (currIndex++ == index)
+ {
+ return item;
+ }
+ }
+
+ throw new ArgumentOutOfRangeException("index is outside the bounds of the source sequence.");
+ }
+
+ return hash.ElementAt(index);
+ }
}
}
\ No newline at end of file
diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs
index 674aebfd08..14f6a84a41 100644
--- a/libs/server/Objects/Hash/HashObjectImpl.cs
+++ b/libs/server/Objects/Hash/HashObjectImpl.cs
@@ -33,7 +33,7 @@ private void HashGet(ref ObjectInput input, ref SpanByteAndMemory output)
{
var key = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray();
- if (hash.TryGetValue(key, out var hashValue))
+ if (TryGetValue(key, out var hashValue))
{
while (!RespWriteUtils.WriteBulkString(hashValue, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
@@ -75,7 +75,7 @@ private void HashMultipleGet(ref ObjectInput input, ref SpanByteAndMemory output
{
var key = input.parseState.GetArgSliceByRef(i).SpanByte.ToByteArray();
- if (hash.TryGetValue(key, out var hashValue))
+ if (TryGetValue(key, out var hashValue))
{
while (!RespWriteUtils.WriteBulkString(hashValue, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
@@ -115,17 +115,24 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output)
{
if (respProtocolVersion < 3)
{
- while (!RespWriteUtils.WriteArrayLength(hash.Count * 2, ref curr, end))
+ while (!RespWriteUtils.WriteArrayLength(Count() * 2, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
}
else
{
- while (!RespWriteUtils.WriteMapLength(hash.Count, ref curr, end))
+ while (!RespWriteUtils.WriteMapLength(Count(), ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
}
+ var isExpirable = HasExpirableItems();
+
foreach (var item in hash)
{
+ if (isExpirable && IsExpired(item.Key))
+ {
+ continue;
+ }
+
while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end))
@@ -151,17 +158,16 @@ private void HashDelete(ref ObjectInput input, byte* output)
{
var key = input.parseState.GetArgSliceByRef(i).SpanByte.ToByteArray();
- if (hash.Remove(key, out var hashValue))
+ if (Remove(key, out var hashValue))
{
_output->result1++;
- this.UpdateSize(key, hashValue, false);
}
}
}
private void HashLength(byte* output)
{
- ((ObjectOutputHeader*)output)->result1 = hash.Count;
+ ((ObjectOutputHeader*)output)->result1 = Count();
}
private void HashStrLength(ref ObjectInput input, byte* output)
@@ -170,7 +176,7 @@ private void HashStrLength(ref ObjectInput input, byte* output)
*_output = default;
var key = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray();
- _output->result1 = hash.TryGetValue(key, out var hashValue) ? hashValue.Length : 0;
+ _output->result1 = TryGetValue(key, out var hashValue) ? hashValue.Length : 0;
}
private void HashExists(ref ObjectInput input, byte* output)
@@ -179,7 +185,7 @@ private void HashExists(ref ObjectInput input, byte* output)
*_output = default;
var field = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray();
- _output->result1 = hash.ContainsKey(field) ? 1 : 0;
+ _output->result1 = ContainsKey(field) ? 1 : 0;
}
private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output)
@@ -204,11 +210,21 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output
{
if (includedCount)
{
- if (countParameter > 0 && countParameter > hash.Count)
- countParameter = hash.Count;
+ var count = Count();
+
+ if (count == 0) // This can happen because of expiration but RMW operation haven't applied yet
+ {
+ while (!RespWriteUtils.WriteEmptyArray(ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+ _output.result1 = 0;
+ return;
+ }
+
+ if (countParameter > 0 && countParameter > count)
+ countParameter = count;
var absCount = Math.Abs(countParameter);
- var indexes = RandomUtils.PickKRandomIndexes(hash.Count, absCount, seed, countParameter > 0);
+ var indexes = RandomUtils.PickKRandomIndexes(count, absCount, seed, countParameter > 0);
// Write the size of the array reply
while (!RespWriteUtils.WriteArrayLength(withValues ? absCount * 2 : absCount, ref curr, end))
@@ -216,7 +232,7 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output
foreach (var index in indexes)
{
- var pair = hash.ElementAt(index);
+ var pair = ElementAt(index);
while (!RespWriteUtils.WriteBulkString(pair.Key, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
@@ -232,8 +248,17 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output
else // No count parameter is present, we just return a random field
{
// Write a bulk string value of a random field from the hash value stored at key.
- var index = RandomUtils.PickRandomIndex(hash.Count, seed);
- var pair = hash.ElementAt(index);
+ var count = Count();
+ if (count == 0) // This can happen because of expiration but RMW operation haven't applied yet
+ {
+ while (!RespWriteUtils.WriteNull(ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+ _output.result1 = 0;
+ return;
+ }
+
+ var index = RandomUtils.PickRandomIndex(count, seed);
+ var pair = ElementAt(index);
while (!RespWriteUtils.WriteBulkString(pair.Key, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
countDone = 1;
@@ -262,26 +287,31 @@ private void HashSet(ref ObjectInput input, byte* output)
var key = input.parseState.GetArgSliceByRef(i).SpanByte.ToByteArray();
var value = input.parseState.GetArgSliceByRef(i + 1).SpanByte.ToByteArray();
- if (!hash.TryGetValue(key, out var hashValue))
+ if (!TryGetValue(key, out var hashValue))
{
- hash.Add(key, value);
- this.UpdateSize(key, value);
+ Add(key, value);
_output->result1++;
}
- else if ((hop == HashOperation.HSET || hop == HashOperation.HMSET) && hashValue != default &&
- !hashValue.AsSpan().SequenceEqual(value))
+ else if ((hop == HashOperation.HSET || hop == HashOperation.HMSET) && hashValue != default)
{
- hash[key] = value;
- // Skip overhead as existing item is getting replaced.
- this.Size += Utility.RoundUp(value.Length, IntPtr.Size) -
- Utility.RoundUp(hashValue.Length, IntPtr.Size);
+ Set(key, value);
}
}
}
+ private void HashCollect(ref ObjectInput input, byte* output)
+ {
+ var _output = (ObjectOutputHeader*)output;
+ *_output = default;
+
+ DeleteExpiredItems();
+
+ _output->result1 = 1;
+ }
+
private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory output)
{
- var count = hash.Count;
+ var count = Count();
var op = input.header.HashOp;
var isMemory = false;
@@ -297,8 +327,15 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou
while (!RespWriteUtils.WriteArrayLength(count, ref curr, end))
ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+ var isExpirable = HasExpirableItems();
+
foreach (var item in hash)
{
+ if (isExpirable && IsExpired(item.Key))
+ {
+ continue;
+ }
+
if (HashOperation.HKEYS == op)
{
while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end))
@@ -343,7 +380,7 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output)
var key = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray();
var incrSlice = input.parseState.GetArgSliceByRef(1);
- var valueExists = hash.TryGetValue(key, out var value);
+ var valueExists = TryGetValue(key, out var value);
if (op == HashOperation.HINCRBY)
{
if (!NumUtils.TryParse(incrSlice.ReadOnlySpan, out int incr))
@@ -376,15 +413,12 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output)
resultSpan = resultSpan.Slice(0, bytesWritten);
resultBytes = resultSpan.ToArray();
- hash[key] = resultBytes;
- Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) -
- Utility.RoundUp(value.Length, IntPtr.Size);
+ SetWithoutPersist(key, resultBytes);
}
else
{
resultBytes = incrSlice.SpanByte.ToByteArray();
- hash.Add(key, resultBytes);
- UpdateSize(key, resultBytes);
+ Add(key, resultBytes);
}
while (!RespWriteUtils.WriteIntegerFromBytes(resultBytes, ref curr, end))
@@ -417,15 +451,12 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output)
result += incr;
resultBytes = Encoding.ASCII.GetBytes(result.ToString(CultureInfo.InvariantCulture));
- hash[key] = resultBytes;
- Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) -
- Utility.RoundUp(value.Length, IntPtr.Size);
+ SetWithoutPersist(key, resultBytes);
}
else
{
resultBytes = incrSlice.SpanByte.ToByteArray();
- hash.Add(key, resultBytes);
- UpdateSize(key, resultBytes);
+ Add(key, resultBytes);
}
while (!RespWriteUtils.WriteBulkString(resultBytes, ref curr, end))
@@ -444,5 +475,138 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output)
output.Length = (int)(curr - ptr);
}
}
+
+ private void HashExpire(ref ObjectInput input, ref SpanByteAndMemory output)
+ {
+ var isMemory = false;
+ MemoryHandle ptrHandle = default;
+ var ptr = output.SpanByte.ToPointer();
+
+ var curr = ptr;
+ var end = curr + output.Length;
+
+ ObjectOutputHeader _output = default;
+ try
+ {
+ DeleteExpiredItems();
+
+ var expireOption = (ExpireOption)input.arg1;
+ var expiration = input.parseState.GetLong(0);
+ var numFields = input.parseState.Count - 1;
+ while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+
+ foreach (var item in input.parseState.Parameters.Slice(1))
+ {
+ var result = SetExpiration(item.ToArray(), expiration, expireOption);
+ while (!RespWriteUtils.WriteInteger(result, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+ _output.result1++;
+ }
+ }
+ finally
+ {
+ while (!RespWriteUtils.WriteDirect(ref _output, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+
+ if (isMemory) ptrHandle.Dispose();
+ output.Length = (int)(curr - ptr);
+ }
+ }
+
+ private void HashTimeToLive(ref ObjectInput input, ref SpanByteAndMemory output)
+ {
+ var isMemory = false;
+ MemoryHandle ptrHandle = default;
+ var ptr = output.SpanByte.ToPointer();
+
+ var curr = ptr;
+ var end = curr + output.Length;
+
+ ObjectOutputHeader _output = default;
+ try
+ {
+ DeleteExpiredItems();
+
+ var isMilliseconds = input.arg1 == 1;
+ var isTimestamp = input.arg2 == 1;
+ var numFields = input.parseState.Count;
+ while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+
+ foreach (var item in input.parseState.Parameters)
+ {
+ var result = GetExpiration(item.ToArray());
+
+ if (result >= 0)
+ {
+ if (isTimestamp && isMilliseconds)
+ {
+ result = ConvertUtils.UnixTimeInMillisecondsFromTicks(result);
+ }
+ else if (isTimestamp && !isMilliseconds)
+ {
+ result = ConvertUtils.UnixTimeInSecondsFromTicks(result);
+ }
+ else if (!isTimestamp && isMilliseconds)
+ {
+ result = ConvertUtils.MillisecondsFromDiffUtcNowTicks(result);
+ }
+ else if (!isTimestamp && !isMilliseconds)
+ {
+ result = ConvertUtils.SecondsFromDiffUtcNowTicks(result);
+ }
+ }
+
+ while (!RespWriteUtils.WriteInteger(result, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+ _output.result1++;
+ }
+ }
+ finally
+ {
+ while (!RespWriteUtils.WriteDirect(ref _output, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+
+ if (isMemory) ptrHandle.Dispose();
+ output.Length = (int)(curr - ptr);
+ }
+ }
+
+ private void HashPersist(ref ObjectInput input, ref SpanByteAndMemory output)
+ {
+ var isMemory = false;
+ MemoryHandle ptrHandle = default;
+ var ptr = output.SpanByte.ToPointer();
+
+ var curr = ptr;
+ var end = curr + output.Length;
+
+ ObjectOutputHeader _output = default;
+ try
+ {
+ DeleteExpiredItems();
+
+ var numFields = input.parseState.Count;
+ while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+
+ foreach (var item in input.parseState.Parameters)
+ {
+ var result = Persist(item.ToArray());
+ while (!RespWriteUtils.WriteInteger(result, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+ _output.result1++;
+ }
+ }
+ finally
+ {
+ while (!RespWriteUtils.WriteDirect(ref _output, ref curr, end))
+ ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end);
+
+ if (isMemory) ptrHandle.Dispose();
+ output.Length = (int)(curr - ptr);
+ }
+ }
}
}
\ No newline at end of file
diff --git a/libs/server/Objects/Types/GarnetObject.cs b/libs/server/Objects/Types/GarnetObject.cs
index f5366dffbb..7474d547e7 100644
--- a/libs/server/Objects/Types/GarnetObject.cs
+++ b/libs/server/Objects/Types/GarnetObject.cs
@@ -66,6 +66,12 @@ internal static bool NeedToCreate(RespInputHeader header)
SetOperation.SPOP => false,
_ => true,
},
+ GarnetObjectType.Hash => header.HashOp switch
+ {
+ HashOperation.HEXPIRE => false,
+ HashOperation.HCOLLECT => false,
+ _ => true,
+ },
GarnetObjectType.Expire => false,
GarnetObjectType.PExpire => false,
GarnetObjectType.Persist => false,
diff --git a/libs/server/Resp/AdminCommands.cs b/libs/server/Resp/AdminCommands.cs
index 923e3dbb5d..16aa01e797 100644
--- a/libs/server/Resp/AdminCommands.cs
+++ b/libs/server/Resp/AdminCommands.cs
@@ -18,7 +18,7 @@ namespace Garnet.server
///
internal sealed unsafe partial class RespServerSession : ServerSessionBase
{
- private void ProcessAdminCommands(RespCommand command)
+ private void ProcessAdminCommands(RespCommand command, ref TGarnetApi storageApi) where TGarnetApi : IGarnetApi
{
/*
* WARNING: Here is safe to add @slow commands (check how containsSlowCommand is used).
@@ -49,6 +49,7 @@ RespCommand.MIGRATE or
RespCommand.BGSAVE => NetworkBGSAVE(),
RespCommand.COMMITAOF => NetworkCOMMITAOF(),
RespCommand.FORCEGC => NetworkFORCEGC(),
+ RespCommand.HCOLLECT => NetworkHCOLLECT(ref storageApi),
RespCommand.MONITOR => NetworkMonitor(),
RespCommand.ACL_DELUSER => NetworkAclDelUser(),
RespCommand.ACL_LIST => NetworkAclList(),
@@ -567,6 +568,36 @@ private bool NetworkFORCEGC()
return true;
}
+ private bool NetworkHCOLLECT(ref TGarnetApi storageApi)
+ where TGarnetApi : IGarnetApi
+ {
+ if (parseState.Count < 1)
+ {
+ return AbortWithWrongNumberOfArguments(nameof(RespCommand.HCOLLECT));
+ }
+
+ var keys = parseState.Parameters;
+
+ var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT };
+ var input = new ObjectInput(header);
+
+ var status = storageApi.HashCollect(keys, ref input);
+
+ switch (status)
+ {
+ case GarnetStatus.OK:
+ while (!RespWriteUtils.WriteDirect(CmdStrings.RESP_OK, ref dcurr, dend))
+ SendAndReset();
+ break;
+ default:
+ while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_HCOLLECT_ALREADY_IN_PROGRESS, ref dcurr, dend))
+ SendAndReset();
+ break;
+ }
+
+ return true;
+ }
+
private bool NetworkProcessClusterCommand(RespCommand command)
{
if (clusterSession == null)
diff --git a/libs/server/Resp/ClientCommands.cs b/libs/server/Resp/ClientCommands.cs
index 2d1a7889d6..ec5c46d6e2 100644
--- a/libs/server/Resp/ClientCommands.cs
+++ b/libs/server/Resp/ClientCommands.cs
@@ -273,7 +273,7 @@ private bool NetworkCLIENTKILL()
{
if (!ParseUtils.TryReadLong(ref value, out var idParsed))
{
- return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "client-id")));
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrShouldBeGreaterThanZero, "client-id")));
}
if (id is not null)
diff --git a/libs/server/Resp/CmdStrings.cs b/libs/server/Resp/CmdStrings.cs
index 22141be4d4..8f15f898e9 100644
--- a/libs/server/Resp/CmdStrings.cs
+++ b/libs/server/Resp/CmdStrings.cs
@@ -105,6 +105,7 @@ static partial class CmdStrings
public static ReadOnlySpan MAXLEN => "MAXLEN"u8;
public static ReadOnlySpan maxlen => "maxlen"u8;
public static ReadOnlySpan PUBSUB => "PUBSUB"u8;
+ public static ReadOnlySpan HCOLLECT => "HCOLLECT"u8;
public static ReadOnlySpan CHANNELS => "CHANNELS"u8;
public static ReadOnlySpan NUMPAT => "NUMPAT"u8;
public static ReadOnlySpan NUMSUB => "NUMSUB"u8;
@@ -125,6 +126,7 @@ static partial class CmdStrings
public static ReadOnlySpan WEIGHTS => "WEIGHTS"u8;
public static ReadOnlySpan AGGREGATE => "AGGREGATE"u8;
public static ReadOnlySpan SUM => "SUM"u8;
+ public static ReadOnlySpan FIELDS => "FIELDS"u8;
///
/// Response strings
@@ -215,6 +217,8 @@ static partial class CmdStrings
public static ReadOnlySpan RESP_ERR_INCR_SUPPORTS_ONLY_SINGLE_PAIR => "ERR INCR option supports a single increment-element pair"u8;
public static ReadOnlySpan RESP_ERR_INVALID_BITFIELD_TYPE => "ERR Invalid bitfield type. Use something like i16 u8. Note that u64 is not supported but i64 is"u8;
public static ReadOnlySpan RESP_ERR_SCRIPT_FLUSH_OPTIONS => "ERR SCRIPT FLUSH only support SYNC|ASYNC option"u8;
+ public static ReadOnlySpan RESP_ERR_INVALID_EXPIRE_TIME => "ERR invalid expire time, must be >= 0"u8;
+ public static ReadOnlySpan RESP_ERR_HCOLLECT_ALREADY_IN_PROGRESS => "ERR HCOLLECT scan already in progress"u8;
///
/// Response string templates
@@ -226,11 +230,13 @@ static partial class CmdStrings
public const string GenericErrWrongNumArgsTxn =
"ERR Invalid number of parameters to stored proc {0}, expected {1}, actual {2}";
public const string GenericSyntaxErrorOption = "ERR Syntax error in {0} option '{1}'";
- public const string GenericParamShouldBeGreaterThanZero = "ERR {0} should be greater than 0";
+ public const string GenericParamShouldBeGreaterThanZero = "ERR Parameter `{0}` should be greater than 0";
public const string GenericErrNotAFloat = "ERR {0} value is not a valid float";
public const string GenericErrCantBeNegative = "ERR {0} can't be negative";
public const string GenericErrAtLeastOneKey = "ERR at least 1 input key is needed for '{0}' command";
public const string GenericErrShouldBeGreaterThanZero = "ERR {0} should be greater than 0";
+ public const string GenericErrMandatoryMissing = "Mandatory argument {0} is missing or not at the right position";
+ public const string GenericErrMustMatchNoOfArgs = "The `{0}` parameter must match the number of arguments";
public const string GenericUnknownClientType = "ERR Unknown client type '{0}'";
public const string GenericErrDuplicateFilter = "ERR Filter '{0}' defined multiple times";
public const string GenericPubSubCommandDisabled = "ERR {0} is disabled, enable it with --pubsub option.";
diff --git a/libs/server/Resp/Objects/HashCommands.cs b/libs/server/Resp/Objects/HashCommands.cs
index 818ae9bc9f..687429f3b8 100644
--- a/libs/server/Resp/Objects/HashCommands.cs
+++ b/libs/server/Resp/Objects/HashCommands.cs
@@ -2,6 +2,8 @@
// Licensed under the MIT license.
using System;
+using System.Diagnostics;
+using System.Text;
using Garnet.common;
using Tsavorite.core;
@@ -569,5 +571,261 @@ private unsafe bool HashIncrement(RespCommand command, ref TGarnetAp
}
return true;
}
+
+ ///
+ /// Sets an expiration time for a field in the hash stored at key.
+ ///
+ ///
+ ///
+ ///
+ ///
+ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi storageApi)
+ where TGarnetApi : IGarnetApi
+ {
+ if (storeWrapper.itemBroker == null)
+ throw new GarnetException("Object store is disabled");
+
+ if (parseState.Count <= 4)
+ {
+ return AbortWithWrongNumberOfArguments(command.ToString());
+ }
+
+ var key = parseState.GetArgSliceByRef(0);
+
+ long expireAt = 0;
+ var isMilliseconds = false;
+ if (!parseState.TryGetLong(1, out expireAt))
+ {
+ return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER);
+ }
+
+ if (expireAt < 0)
+ {
+ return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME);
+ }
+
+ switch (command)
+ {
+ case RespCommand.HEXPIRE:
+ expireAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expireAt;
+ isMilliseconds = false;
+ break;
+ case RespCommand.HPEXPIRE:
+ expireAt = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + expireAt;
+ isMilliseconds = true;
+ break;
+ case RespCommand.HPEXPIREAT:
+ isMilliseconds = true;
+ break;
+ default: // RespCommand.HEXPIREAT
+ break;
+ }
+
+ var currIdx = 2;
+ if (parseState.TryGetExpireOption(currIdx, out var expireOption))
+ {
+ currIdx++; // If expire option is present, move to next argument else continue with the current argument
+ }
+
+ var fieldOption = parseState.GetArgSliceByRef(currIdx++);
+ if (!fieldOption.ReadOnlySpan.EqualsUpperCaseSpanIgnoringCase(CmdStrings.FIELDS))
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMandatoryMissing, "FIELDS")));
+ }
+
+ if (!parseState.TryGetInt(currIdx++, out var numFields))
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numFields")));
+ }
+
+ if (parseState.Count != currIdx + numFields)
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMustMatchNoOfArgs, "numFields")));
+ }
+
+ var fieldsParseState = parseState.Slice(currIdx, numFields);
+
+ // Prepare input
+ var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HEXPIRE };
+ var input = new ObjectInput(header, ref fieldsParseState);
+
+ var outputFooter = new GarnetObjectStoreOutput { spanByteAndMemory = new SpanByteAndMemory(dcurr, (int)(dend - dcurr)) };
+
+ var status = storageApi.HashExpire(key, expireAt, isMilliseconds, expireOption, ref input, ref outputFooter);
+
+ switch (status)
+ {
+ case GarnetStatus.WRONGTYPE:
+ while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend))
+ SendAndReset();
+ break;
+ case GarnetStatus.NOTFOUND:
+ while (!RespWriteUtils.WriteArrayLength(numFields, ref dcurr, dend))
+ SendAndReset();
+ for (var i = 0; i < numFields; i++)
+ {
+ while (!RespWriteUtils.WriteInteger(-2, ref dcurr, dend))
+ SendAndReset();
+ }
+ break;
+ default:
+ ProcessOutputWithHeader(outputFooter.spanByteAndMemory);
+ break;
+ }
+
+ return true;
+ }
+
+ ///
+ /// Returns the time to live (TTL) for the specified fields in the hash stored at the given key.
+ ///
+ /// The type of the storage API.
+ /// The RESP command indicating the type of TTL operation.
+ /// The storage API instance to interact with the underlying storage.
+ /// True if the operation was successful; otherwise, false.
+ /// Thrown when the object store is disabled.
+ private unsafe bool HashTimeToLive(RespCommand command, ref TGarnetApi storageApi)
+ where TGarnetApi : IGarnetApi
+ {
+ if (storeWrapper.itemBroker == null)
+ throw new GarnetException("Object store is disabled");
+
+ if (parseState.Count <= 3)
+ {
+ return AbortWithWrongNumberOfArguments(command.ToString());
+ }
+
+ var key = parseState.GetArgSliceByRef(0);
+
+ var fieldOption = parseState.GetArgSliceByRef(1);
+ if (!fieldOption.ReadOnlySpan.EqualsUpperCaseSpanIgnoringCase(CmdStrings.FIELDS))
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMandatoryMissing, "FIELDS")));
+ }
+
+ if (!parseState.TryGetInt(2, out var numFields))
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numFields")));
+ }
+
+ if (parseState.Count != 3 + numFields)
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMustMatchNoOfArgs, "numFields")));
+ }
+
+ var isMilliseconds = false;
+ var isTimestamp = false;
+ switch (command)
+ {
+ case RespCommand.HPTTL:
+ isMilliseconds = true;
+ isTimestamp = false;
+ break;
+ case RespCommand.HEXPIRETIME:
+ isMilliseconds = false;
+ isTimestamp = true;
+ break;
+ case RespCommand.HPEXPIRETIME:
+ isMilliseconds = true;
+ isTimestamp = true;
+ break;
+ default: // RespCommand.HTTL
+ break;
+ }
+
+ var fieldsParseState = parseState.Slice(3, numFields);
+
+ // Prepare input
+ var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HTTL };
+ var input = new ObjectInput(header, ref fieldsParseState);
+
+ var outputFooter = new GarnetObjectStoreOutput { spanByteAndMemory = new SpanByteAndMemory(dcurr, (int)(dend - dcurr)) };
+
+ var status = storageApi.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter);
+
+ switch (status)
+ {
+ case GarnetStatus.WRONGTYPE:
+ while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend))
+ SendAndReset();
+ break;
+ case GarnetStatus.NOTFOUND:
+ while (!RespWriteUtils.WriteArrayLength(numFields, ref dcurr, dend))
+ SendAndReset();
+ for (var i = 0; i < numFields; i++)
+ {
+ while (!RespWriteUtils.WriteInteger(-2, ref dcurr, dend))
+ SendAndReset();
+ }
+ break;
+ default:
+ ProcessOutputWithHeader(outputFooter.spanByteAndMemory);
+ break;
+ }
+
+ return true;
+ }
+
+ private unsafe bool HashPersist(ref TGarnetApi storageApi)
+ where TGarnetApi : IGarnetApi
+ {
+ if (storeWrapper.itemBroker == null)
+ throw new GarnetException("Object store is disabled");
+
+ if (parseState.Count <= 3)
+ {
+ return AbortWithWrongNumberOfArguments(nameof(RespCommand.HPERSIST));
+ }
+
+ var key = parseState.GetArgSliceByRef(0);
+
+ var fieldOption = parseState.GetArgSliceByRef(1);
+ if (!fieldOption.ReadOnlySpan.EqualsUpperCaseSpanIgnoringCase(CmdStrings.FIELDS))
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMandatoryMissing, "FIELDS")));
+ }
+
+ if (!parseState.TryGetInt(2, out var numFields))
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numFields")));
+ }
+
+ if (parseState.Count != 3 + numFields)
+ {
+ return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMustMatchNoOfArgs, "numFields")));
+ }
+
+ var fieldsParseState = parseState.Slice(3, numFields);
+
+ // Prepare input
+ var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HPERSIST };
+ var input = new ObjectInput(header, ref fieldsParseState);
+
+ var outputFooter = new GarnetObjectStoreOutput { spanByteAndMemory = new SpanByteAndMemory(dcurr, (int)(dend - dcurr)) };
+
+ var status = storageApi.HashPersist(key, ref input, ref outputFooter);
+
+ switch (status)
+ {
+ case GarnetStatus.WRONGTYPE:
+ while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend))
+ SendAndReset();
+ break;
+ case GarnetStatus.NOTFOUND:
+ while (!RespWriteUtils.WriteArrayLength(numFields, ref dcurr, dend))
+ SendAndReset();
+ for (var i = 0; i < numFields; i++)
+ {
+ while (!RespWriteUtils.WriteInteger(-2, ref dcurr, dend))
+ SendAndReset();
+ }
+ break;
+ default:
+ ProcessOutputWithHeader(outputFooter.spanByteAndMemory);
+ break;
+ }
+
+ return true;
+ }
}
}
\ No newline at end of file
diff --git a/libs/server/Resp/Objects/ListCommands.cs b/libs/server/Resp/Objects/ListCommands.cs
index a1cf1087cc..f7bb079656 100644
--- a/libs/server/Resp/Objects/ListCommands.cs
+++ b/libs/server/Resp/Objects/ListCommands.cs
@@ -196,7 +196,7 @@ private unsafe bool ListPopMultiple(ref TGarnetApi storageApi)
// Read count of keys
if (!parseState.TryGetInt(currTokenId++, out var numKeys))
{
- var err = string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numkeys");
+ var err = string.Format(CmdStrings.GenericErrShouldBeGreaterThanZero, "numkeys");
return AbortWithErrorMessage(Encoding.ASCII.GetBytes(err));
}
@@ -237,7 +237,7 @@ private unsafe bool ListPopMultiple(ref TGarnetApi storageApi)
// Read count
if (!parseState.TryGetInt(currTokenId, out popCount))
{
- var err = string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "count");
+ var err = string.Format(CmdStrings.GenericErrShouldBeGreaterThanZero, "count");
return AbortWithErrorMessage(Encoding.ASCII.GetBytes(err));
}
}
diff --git a/libs/server/Resp/Parser/RespCommand.cs b/libs/server/Resp/Parser/RespCommand.cs
index 9e706a5859..8ced2becb1 100644
--- a/libs/server/Resp/Parser/RespCommand.cs
+++ b/libs/server/Resp/Parser/RespCommand.cs
@@ -45,6 +45,10 @@ public enum RespCommand : ushort
HSTRLEN,
HVALS,
KEYS,
+ HTTL,
+ HPTTL,
+ HEXPIRETIME,
+ HPEXPIRETIME,
LINDEX,
LLEN,
LPOS,
@@ -106,7 +110,13 @@ public enum RespCommand : ushort
GETDEL,
GETEX,
GETSET,
+ HCOLLECT,
HDEL,
+ HEXPIRE,
+ HPEXPIRE,
+ HEXPIREAT,
+ HPEXPIREAT,
+ HPERSIST,
HINCRBY,
HINCRBYFLOAT,
HMSET,
@@ -802,6 +812,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.HLEN;
}
+ else if (*(ulong*)(ptr + 2) == MemoryMarshal.Read("\r\nHTTL\r\n"u8))
+ {
+ return RespCommand.HTTL;
+ }
break;
case 'K':
@@ -976,6 +990,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.HSCAN;
}
+ else if (*(ulong*)(ptr + 3) == MemoryMarshal.Read("\nHPTTL\r\n"u8))
+ {
+ return RespCommand.HPTTL;
+ }
break;
case 'L':
@@ -1267,6 +1285,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.HEXISTS;
}
+ else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HEXPIRE\r"u8) && *(byte*)(ptr + 12) == '\n')
+ {
+ return RespCommand.HEXPIRE;
+ }
else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HINCRBY\r"u8) && *(byte*)(ptr + 12) == '\n')
{
return RespCommand.HINCRBY;
@@ -1351,6 +1373,14 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.EXPIREAT;
}
+ else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HPEXPIRE"u8) && *(ushort*)(ptr + 12) == MemoryMarshal.Read("\r\n"u8))
+ {
+ return RespCommand.HPEXPIRE;
+ }
+ else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HPERSIST"u8) && *(ushort*)(ptr + 12) == MemoryMarshal.Read("\r\n"u8))
+ {
+ return RespCommand.HPERSIST;
+ }
break;
case 9:
if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("SUBSCRIB"u8) && *(uint*)(ptr + 11) == MemoryMarshal.Read("BE\r\n"u8))
@@ -1381,6 +1411,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.PEXPIREAT;
}
+ else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HEXPIREA"u8) && *(uint*)(ptr + 11) == MemoryMarshal.Read("AT\r\n"u8))
+ {
+ return RespCommand.HEXPIREAT;
+ }
break;
}
@@ -1446,6 +1480,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.ZINTERCARD;
}
+ else if (*(ulong*)(ptr + 1) == MemoryMarshal.Read("10\r\nHPEX"u8) && *(uint*)(ptr + 9) == MemoryMarshal.Read("PIREAT\r\n"u8))
+ {
+ return RespCommand.HPEXPIREAT;
+ }
break;
case 11:
@@ -1493,6 +1531,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.ZUNIONSTORE;
}
+ else if (*(ulong*)(ptr + 2) == MemoryMarshal.Read("1\r\nHEXPI"u8) && *(ulong*)(ptr + 10) == MemoryMarshal.Read("RETIME\r\n"u8))
+ {
+ return RespCommand.HEXPIRETIME;
+ }
break;
case 12:
@@ -1504,6 +1546,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan
{
return RespCommand.HINCRBYFLOAT;
}
+ else if (*(ulong*)(ptr + 3) == MemoryMarshal.Read("\r\nHPEXPI"u8) && *(ulong*)(ptr + 11) == MemoryMarshal.Read("RETIME\r\n"u8))
+ {
+ return RespCommand.HPEXPIRETIME;
+ }
break;
case 13:
@@ -2114,6 +2160,10 @@ private RespCommand SlowParseCommand(ref int count, ref ReadOnlySpan speci
return RespCommand.NONE;
}
}
+ else if (command.SequenceEqual(CmdStrings.HCOLLECT))
+ {
+ return RespCommand.HCOLLECT;
+ }
else
{
// Custom commands should have never been set when we reach this point
diff --git a/libs/server/Resp/RespServerSession.cs b/libs/server/Resp/RespServerSession.cs
index f72be029bb..722ed01a0d 100644
--- a/libs/server/Resp/RespServerSession.cs
+++ b/libs/server/Resp/RespServerSession.cs
@@ -683,6 +683,15 @@ private bool ProcessArrayCommands(RespCommand cmd, ref TGarnetApi st
RespCommand.HVALS => HashKeys(cmd, ref storageApi),
RespCommand.HINCRBY => HashIncrement(cmd, ref storageApi),
RespCommand.HINCRBYFLOAT => HashIncrement(cmd, ref storageApi),
+ RespCommand.HEXPIRE => HashExpire(cmd, ref storageApi),
+ RespCommand.HPEXPIRE => HashExpire(cmd, ref storageApi),
+ RespCommand.HEXPIREAT => HashExpire(cmd, ref storageApi),
+ RespCommand.HPEXPIREAT => HashExpire(cmd, ref storageApi),
+ RespCommand.HTTL => HashTimeToLive(cmd, ref storageApi),
+ RespCommand.HPTTL => HashTimeToLive(cmd, ref storageApi),
+ RespCommand.HEXPIRETIME => HashTimeToLive(cmd, ref storageApi),
+ RespCommand.HPEXPIRETIME => HashTimeToLive(cmd, ref storageApi),
+ RespCommand.HPERSIST => HashPersist(ref storageApi),
RespCommand.HSETNX => HashSet(cmd, ref storageApi),
RespCommand.HRANDFIELD => HashRandomField(cmd, ref storageApi),
RespCommand.HSCAN => ObjectScan(GarnetObjectType.Hash, ref storageApi),
@@ -757,7 +766,7 @@ private bool ProcessOtherCommands(RespCommand command, ref TGarnetAp
RespCommand.EVAL => TryEVAL(),
RespCommand.EVALSHA => TryEVALSHA(),
- _ => Process(command)
+ _ => Process(command, ref storageApi)
};
bool NetworkCLIENTID()
@@ -805,9 +814,9 @@ bool NetworkCustomProcedure()
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- bool Process(RespCommand command)
+ bool Process(RespCommand command, ref TGarnetApi storageApi)
{
- ProcessAdminCommands(command);
+ ProcessAdminCommands(command, ref storageApi);
return true;
}
diff --git a/libs/server/Servers/GarnetServerOptions.cs b/libs/server/Servers/GarnetServerOptions.cs
index 0093cfd8e0..92effae6d8 100644
--- a/libs/server/Servers/GarnetServerOptions.cs
+++ b/libs/server/Servers/GarnetServerOptions.cs
@@ -136,6 +136,11 @@ public class GarnetServerOptions : ServerOptions
///
public int CompactionFrequencySecs = 0;
+ ///
+ /// Hash collection frequency in seconds. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation.
+ ///
+ public int HashCollectFrequencySecs = 0;
+
///
/// Hybrid log compaction type.
/// None - no compaction.
diff --git a/libs/server/Storage/Session/Common/MemoryUtils.cs b/libs/server/Storage/Session/Common/MemoryUtils.cs
index bcbf3e0579..7d76ad3a8e 100644
--- a/libs/server/Storage/Session/Common/MemoryUtils.cs
+++ b/libs/server/Storage/Session/Common/MemoryUtils.cs
@@ -38,6 +38,12 @@ public static class MemoryUtils
/// .Net object avg. overhead for holding a hash set entry
public const int HashSetEntryOverhead = 40;
+ /// .Net object overhead for priority queue
+ public const int PriorityQueueOverhead = 80;
+
+ /// .Net object avg. overhead for holding a priority queue entry
+ public const int PriorityQueueEntryOverhead = 48;
+
internal static long CalculateKeyValueSize(byte[] key, IGarnetObject value)
{
// Round up key size to account for alignment during allocation
diff --git a/libs/server/Storage/Session/ObjectStore/HashOps.cs b/libs/server/Storage/Session/ObjectStore/HashOps.cs
index aa61cee47e..cc59b7acbd 100644
--- a/libs/server/Storage/Session/ObjectStore/HashOps.cs
+++ b/libs/server/Storage/Session/ObjectStore/HashOps.cs
@@ -2,6 +2,7 @@
// Licensed under the MIT license.
using System;
+using Garnet.common;
using Tsavorite.core;
namespace Garnet.server
@@ -14,6 +15,8 @@ namespace Garnet.server
///
sealed partial class StorageSession : IDisposable
{
+ private SingleWriterMultiReaderLock _hcollectTaskLock;
+
///
/// HashSet: Sets the specified fields to their respective values in the hash stored at key.
/// Values of specified fields that exist in the hash are overwritten.
@@ -537,5 +540,127 @@ public GarnetStatus HashIncrement(byte[] key, ArgSlice input, ou
public GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext)
where TObjectContext : ITsavoriteContext
=> RMWObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter);
+
+ ///
+ /// Sets the expiration time for the specified key.
+ ///
+ /// The type of the object context.
+ /// The key for which to set the expiration time.
+ /// The expiration time in ticks.
+ /// Indicates whether the expiration time is in milliseconds.
+ /// The expiration option to use.
+ /// The input object containing the operation details.
+ /// The output footer object to store the result.
+ /// The object context for the operation.
+ /// The status of the operation.
+ public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext)
+ where TObjectContext : ITsavoriteContext
+ {
+ var expireAtUtc = isMilliseconds ? ConvertUtils.UnixTimestampInMillisecondsToTicks(expireAt) : ConvertUtils.UnixTimestampInSecondsToTicks(expireAt);
+ var expiryLength = NumUtils.NumDigitsInLong(expireAtUtc);
+ var expirySlice = scratchBufferManager.CreateArgSlice(expiryLength);
+ var expirySpan = expirySlice.Span;
+ NumUtils.LongToSpanByte(expireAtUtc, expirySpan);
+
+ parseState.Initialize(1 + input.parseState.Count);
+ parseState.SetArgument(0, expirySlice);
+ parseState.SetArguments(1, input.parseState.Parameters);
+
+ var innerInput = new ObjectInput(input.header, ref parseState, startIdx: 0, arg1: (int)expireOption);
+
+ return RMWObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter);
+ }
+
+ ///
+ /// Returns the time-to-live (TTL) of a hash key.
+ ///
+ /// The type of the object context.
+ /// The key of the hash.
+ /// Indicates whether the TTL is in milliseconds.
+ /// Indicates whether the TTL is a timestamp.
+ /// The input object containing the operation details.
+ /// The output footer object to store the result.
+ /// The object context for the operation.
+ /// The status of the operation.
+ public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext)
+ where TObjectContext : ITsavoriteContext
+ {
+ var innerInput = new ObjectInput(input.header, ref input.parseState, arg1: isMilliseconds ? 1 : 0, arg2: isTimestamp ? 1 : 0);
+
+ return ReadObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter);
+ }
+
+ ///
+ /// Removes the expiration time from a hash key, making it persistent.
+ ///
+ /// The type of the object context.
+ /// The key of the hash.
+ /// The input object containing the operation details.
+ /// The output footer object to store the result.
+ /// The object context for the operation.
+ /// The status of the operation.
+ public GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext)
+ where TObjectContext : ITsavoriteContext
+ => RMWObjectStoreOperationWithOutput(key.ToArray(), ref input, ref objectContext, ref outputFooter);
+
+ ///
+ /// Collects hash keys and performs a specified operation on them.
+ ///
+ /// The type of the object context.
+ /// The keys to collect.
+ /// The input object containing the operation details.
+ /// The object context for the operation.
+ /// The status of the operation.
+ ///
+ /// If the first key is "*", all hash keys are scanned in batches and the operation is performed on each key.
+ /// Otherwise, the operation is performed on the specified keys.
+ ///
+ public GarnetStatus HashCollect(ReadOnlySpan keys, ref ObjectInput input, ref TObjectContext objectContext)
+ where TObjectContext : ITsavoriteContext
+ {
+ if (!_hcollectTaskLock.TryWriteLock())
+ {
+ return GarnetStatus.NOTFOUND;
+ }
+
+ try
+ {
+ if (keys[0].ReadOnlySpan.SequenceEqual("*"u8))
+ {
+ long cursor = 0;
+ long storeCursor = 0;
+
+ // Scan all hash keys in batches
+ do
+ {
+ if (!DbScan(keys[0], true, cursor, out storeCursor, out var hashKeys, 100, CmdStrings.HASH))
+ {
+ return GarnetStatus.OK;
+ }
+
+ // Process each hash key
+ foreach (var hashKey in hashKeys)
+ {
+ RMWObjectStoreOperation(hashKey, ref input, out _, ref objectContext);
+ }
+
+ cursor = storeCursor;
+ } while (storeCursor != 0);
+
+ return GarnetStatus.OK;
+ }
+
+ foreach (var key in keys)
+ {
+ RMWObjectStoreOperation(key.ToArray(), ref input, out _, ref objectContext);
+ }
+
+ return GarnetStatus.OK;
+ }
+ finally
+ {
+ _hcollectTaskLock.WriteUnlock();
+ }
+ }
}
}
\ No newline at end of file
diff --git a/libs/server/StoreWrapper.cs b/libs/server/StoreWrapper.cs
index 428003f335..c2aa9b4be6 100644
--- a/libs/server/StoreWrapper.cs
+++ b/libs/server/StoreWrapper.cs
@@ -17,6 +17,7 @@
namespace Garnet.server
{
+ using static System.Reflection.Metadata.BlobBuilder;
using MainStoreAllocator = SpanByteAllocator>;
using MainStoreFunctions = StoreFunctions;
@@ -415,6 +416,49 @@ async Task CompactionTask(int compactionFrequencySecs, CancellationToken token =
}
}
+ async Task HashCollectTask(int hashCollectFrequencySecs, CancellationToken token = default)
+ {
+ Debug.Assert(hashCollectFrequencySecs > 0);
+ try
+ {
+ var scratchBufferManager = new ScratchBufferManager();
+ using var storageSession = new StorageSession(this, scratchBufferManager, null, null, logger);
+
+ while (true)
+ {
+ if (token.IsCancellationRequested) return;
+
+ if (objectStore is null)
+ {
+ logger?.LogWarning("HashCollectFrequencySecs option is configured but Object store is disabled. Stopping the background hash collect task.");
+ return;
+ }
+
+ ExecuteHashCollect(scratchBufferManager, storageSession);
+
+ await Task.Delay(TimeSpan.FromSeconds(hashCollectFrequencySecs), token);
+ }
+ }
+ catch (TaskCanceledException ex) when (token.IsCancellationRequested)
+ {
+ logger?.LogError(ex, "CompactionTask exception received for background hash collect task.");
+ }
+ catch (Exception ex)
+ {
+ logger?.LogCritical(ex, "Unknown exception received for background hash collect task. Hash collect task won't be resumed.");
+ }
+
+ static void ExecuteHashCollect(ScratchBufferManager scratchBufferManager, StorageSession storageSession)
+ {
+ var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT };
+ var input = new ObjectInput(header);
+
+ ReadOnlySpan key = [ArgSlice.FromPinnedSpan("*"u8)];
+ storageSession.HashCollect(key, ref input, ref storageSession.objectStoreBasicContext);
+ scratchBufferManager.Reset();
+ }
+ }
+
void DoCompaction()
{
// Periodic compaction -> no need to compact before checkpointing
@@ -569,6 +613,11 @@ internal void Start()
Task.Run(async () => await CompactionTask(serverOptions.CompactionFrequencySecs, ctsCommit.Token));
}
+ if (serverOptions.HashCollectFrequencySecs > 0)
+ {
+ Task.Run(async () => await HashCollectTask(serverOptions.HashCollectFrequencySecs, ctsCommit.Token));
+ }
+
if (serverOptions.AdjustedIndexMaxCacheLines > 0 || serverOptions.AdjustedObjectStoreIndexMaxCacheLines > 0)
{
Task.Run(() => IndexAutoGrowTask(ctsCommit.Token));
diff --git a/playground/CommandInfoUpdater/SupportedCommand.cs b/playground/CommandInfoUpdater/SupportedCommand.cs
index b0cbe3dd21..474e7cc61d 100644
--- a/playground/CommandInfoUpdater/SupportedCommand.cs
+++ b/playground/CommandInfoUpdater/SupportedCommand.cs
@@ -136,9 +136,19 @@ public class SupportedCommand
new("GETDEL", RespCommand.GETDEL),
new("GETRANGE", RespCommand.GETRANGE),
new("GETSET", RespCommand.GETSET),
+ new("HCOLLECT", RespCommand.HCOLLECT),
new("HDEL", RespCommand.HDEL),
new("HELLO", RespCommand.HELLO),
new("HEXISTS", RespCommand.HEXISTS),
+ new("HEXPIRE", RespCommand.HEXPIRE),
+ new("HPEXPIRE", RespCommand.HPEXPIRE),
+ new("HEXPIREAT", RespCommand.HEXPIREAT),
+ new("HPEXPIREAT", RespCommand.HPEXPIREAT),
+ new("HTTL", RespCommand.HTTL),
+ new("HPTTL", RespCommand.HPTTL),
+ new("HEXPIRETIME", RespCommand.HEXPIRETIME),
+ new("HPEXPIRETIME", RespCommand.HPEXPIRETIME),
+ new("HPERSIST", RespCommand.HPERSIST),
new("HGET", RespCommand.HGET),
new("HGETALL", RespCommand.HGETALL),
new("HINCRBY", RespCommand.HINCRBY),
diff --git a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs
index 659a993f17..4c0b82dd15 100644
--- a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs
+++ b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs
@@ -2442,5 +2442,184 @@ public override string[] GetSingleSlotRequest()
public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
}
+
+ internal class HEXPIRE : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HEXPIRE);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "3", "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HPEXPIRE : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HPEXPIRE);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "3000", "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HEXPIREAT : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HEXPIREAT);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var timestamp = DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString();
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], timestamp, "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HPEXPIREAT : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HPEXPIREAT);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var timestamp = DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString();
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], timestamp, "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HTTL : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HTTL);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HPTTL : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HPTTL);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HEXPIRETIME : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HEXPIRETIME);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HPEXPIRETIME : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HPEXPIRETIME);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HPERSIST : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => true;
+ public override string Command => nameof(HPERSIST);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0], "FIELDS", "1", "field1"];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException();
+ }
+
+ internal class HCOLLECT : BaseCommand
+ {
+ public override bool IsArrayCommand => false;
+ public override bool ArrayResponse => false;
+ public override string Command => nameof(HCOLLECT);
+
+ public override string[] GetSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ return [ssk[0]];
+ }
+
+ public override string[] GetCrossSlotRequest() => throw new NotImplementedException();
+
+ public override ArraySegment[] SetupSingleSlotRequest()
+ {
+ var ssk = GetSingleSlotKeys;
+ var setup = new ArraySegment[1];
+ setup[0] = new ArraySegment(["HSET", ssk[0], "a", "1", "b", "2", "c", "3"]);
+ return setup;
+ }
+ }
+
#endregion
}
\ No newline at end of file
diff --git a/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs b/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs
index a7912b8f84..b7031bf255 100644
--- a/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs
+++ b/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs
@@ -127,6 +127,16 @@ public class ClusterSlotVerificationTests
new HEXISTS(),
new HKEYS(),
new HINCRBY(),
+ new HEXPIRE(),
+ new HPEXPIRE(),
+ new HEXPIREAT(),
+ new HPEXPIREAT(),
+ new HTTL(),
+ new HPTTL(),
+ new HEXPIRETIME(),
+ new HPEXPIRETIME(),
+ new HPERSIST(),
+ new HCOLLECT(),
new CLUSTERGETPROC(),
new CLUSTERSETPROC(),
new WATCH(),
@@ -316,6 +326,16 @@ public virtual void OneTimeTearDown()
[TestCase("HEXISTS")]
[TestCase("HKEYS")]
[TestCase("HINCRBY")]
+ [TestCase("HEXPIRE")]
+ [TestCase("HPEXPIRE")]
+ [TestCase("HEXPIREAT")]
+ [TestCase("HPEXPIREAT")]
+ [TestCase("HTTL")]
+ [TestCase("HPTTL")]
+ [TestCase("HEXPIRETIME")]
+ [TestCase("HPEXPIRETIME")]
+ [TestCase("HPERSIST")]
+ [TestCase("HCOLLECT")]
[TestCase("CLUSTERGETPROC")]
[TestCase("CLUSTERSETPROC")]
[TestCase("WATCH")]
@@ -469,6 +489,16 @@ void GarnetClientSessionClusterDown(BaseCommand command)
[TestCase("HEXISTS")]
[TestCase("HKEYS")]
[TestCase("HINCRBY")]
+ [TestCase("HEXPIRE")]
+ [TestCase("HPEXPIRE")]
+ [TestCase("HEXPIREAT")]
+ [TestCase("HPEXPIREAT")]
+ [TestCase("HTTL")]
+ [TestCase("HPTTL")]
+ [TestCase("HEXPIRETIME")]
+ [TestCase("HPEXPIRETIME")]
+ [TestCase("HPERSIST")]
+ [TestCase("HCOLLECT")]
[TestCase("CLUSTERGETPROC")]
[TestCase("CLUSTERSETPROC")]
[TestCase("WATCHMS")]
@@ -631,6 +661,16 @@ void GarnetClientSessionOK(BaseCommand command)
[TestCase("HEXISTS")]
[TestCase("HKEYS")]
[TestCase("HINCRBY")]
+ [TestCase("HEXPIRE")]
+ [TestCase("HPEXPIRE")]
+ [TestCase("HEXPIREAT")]
+ [TestCase("HPEXPIREAT")]
+ [TestCase("HTTL")]
+ [TestCase("HPTTL")]
+ [TestCase("HEXPIRETIME")]
+ [TestCase("HPEXPIRETIME")]
+ [TestCase("HPERSIST")]
+ [TestCase("HCOLLECT")]
[TestCase("CLUSTERGETPROC")]
[TestCase("CLUSTERSETPROC")]
[TestCase("WATCHMS")]
@@ -784,6 +824,16 @@ void GarnetClientSessionCrossslotTest(BaseCommand command)
[TestCase("HEXISTS")]
[TestCase("HKEYS")]
[TestCase("HINCRBY")]
+ [TestCase("HEXPIRE")]
+ [TestCase("HPEXPIRE")]
+ [TestCase("HEXPIREAT")]
+ [TestCase("HPEXPIREAT")]
+ [TestCase("HTTL")]
+ [TestCase("HPTTL")]
+ [TestCase("HEXPIRETIME")]
+ [TestCase("HPEXPIRETIME")]
+ [TestCase("HPERSIST")]
+ [TestCase("HCOLLECT")]
[TestCase("CLUSTERGETPROC")]
[TestCase("CLUSTERSETPROC")]
[TestCase("WATCHMS")]
@@ -945,6 +995,16 @@ void GarnetClientSessionMOVEDTest(BaseCommand command)
[TestCase("HEXISTS")]
[TestCase("HKEYS")]
[TestCase("HINCRBY")]
+ [TestCase("HEXPIRE")]
+ [TestCase("HPEXPIRE")]
+ [TestCase("HEXPIREAT")]
+ [TestCase("HPEXPIREAT")]
+ [TestCase("HTTL")]
+ [TestCase("HPTTL")]
+ [TestCase("HEXPIRETIME")]
+ [TestCase("HPEXPIRETIME")]
+ [TestCase("HPERSIST")]
+ [TestCase("HCOLLECT")]
[TestCase("CLUSTERGETPROC")]
[TestCase("CLUSTERSETPROC")]
[TestCase("WATCHMS")]
@@ -1124,6 +1184,16 @@ void GarnetClientSessionASKTest(BaseCommand command)
[TestCase("HEXISTS")]
[TestCase("HKEYS")]
[TestCase("HINCRBY")]
+ [TestCase("HEXPIRE")]
+ [TestCase("HPEXPIRE")]
+ [TestCase("HEXPIREAT")]
+ [TestCase("HPEXPIREAT")]
+ [TestCase("HTTL")]
+ [TestCase("HPTTL")]
+ [TestCase("HEXPIRETIME")]
+ [TestCase("HPEXPIRETIME")]
+ [TestCase("HPERSIST")]
+ [TestCase("HCOLLECT")]
[TestCase("CLUSTERGETPROC")]
[TestCase("CLUSTERSETPROC")]
[TestCase("WATCHMS")]
diff --git a/test/Garnet.test/Resp/ACL/RespCommandTests.cs b/test/Garnet.test/Resp/ACL/RespCommandTests.cs
index bf47124bdb..fd22d7a8cb 100644
--- a/test/Garnet.test/Resp/ACL/RespCommandTests.cs
+++ b/test/Garnet.test/Resp/ACL/RespCommandTests.cs
@@ -3185,6 +3185,165 @@ static async Task DoSubStringAsync(GarnetClient client)
}
}
+ [Test]
+ public async Task HExpireACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HEXPIRE",
+ [DoHExpireAsync]
+ );
+
+ static async Task DoHExpireAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HEXPIRE", ["foo", "1", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HPExpireACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HPEXPIRE",
+ [DoHPExpireAsync]
+ );
+
+ static async Task DoHPExpireAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HPEXPIRE", ["foo", "1", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HExpireAtACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HEXPIREAT",
+ [DoHExpireAtAsync]
+ );
+
+ static async Task DoHExpireAtAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HEXPIREAT", ["foo", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(), "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HPExpireAtACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HPEXPIREAT",
+ [DoHPExpireAtAsync]
+ );
+
+ static async Task DoHPExpireAtAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HPEXPIREAT", ["foo", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(), "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HExpireTimeACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HEXPIRETIME",
+ [DoHExpireTimeAsync]
+ );
+
+ static async Task DoHExpireTimeAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HEXPIRETIME", ["foo", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HPExpireTimeACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HPEXPIRETIME",
+ [DoHPExpireTimeAsync]
+ );
+
+ static async Task DoHPExpireTimeAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HPEXPIRETIME", ["foo", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HTTLACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HTTL",
+ [DoHETTLAsync]
+ );
+
+ static async Task DoHETTLAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HTTL", ["foo", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HPTTLACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HPTTL",
+ [DoHPETTLAsync]
+ );
+
+ static async Task DoHPETTLAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HPTTL", ["foo", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HPersistACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HPERSIST",
+ [DoHPersistAsync]
+ );
+
+ static async Task DoHPersistAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringArrayResultAsync("HPERSIST", ["foo", "FIELDS", "1", "bar"]);
+ ClassicAssert.AreEqual(1, val.Length);
+ ClassicAssert.AreEqual("-2", val[0]);
+ }
+ }
+
+ [Test]
+ public async Task HCollectACLsAsync()
+ {
+ await CheckCommandsAsync(
+ "HCOLLECT",
+ [DoHCollectAsync]
+ );
+
+ static async Task DoHCollectAsync(GarnetClient client)
+ {
+ var val = await client.ExecuteForStringResultAsync("HCOLLECT", ["foo"]);
+ ClassicAssert.AreEqual("OK", val);
+ }
+ }
+
[Test]
public async Task HDelACLsAsync()
{
diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs
index e4a7b9b8d2..c8f5f01e56 100644
--- a/test/Garnet.test/RespHashTests.cs
+++ b/test/Garnet.test/RespHashTests.cs
@@ -7,6 +7,7 @@
using System.Threading.Tasks;
using Garnet.server;
using NUnit.Framework;
+using NUnit.Framework.Interfaces;
using NUnit.Framework.Legacy;
using StackExchange.Redis;
@@ -23,7 +24,7 @@ public class RespHashTests
public void Setup()
{
TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true);
- server = TestUtils.CreateGarnetServer(TestUtils.MethodTestDir, lowMemory: true);
+ server = TestUtils.CreateGarnetServer(TestUtils.MethodTestDir, enableReadCache: true, enableObjectStoreReadCache: true, lowMemory: true);
server.Start();
}
@@ -57,6 +58,33 @@ public void CanSetAndGetOnePair()
ClassicAssert.AreEqual("Tsavorite", r);
}
+ [Test]
+ public async Task CanSetAndGetOnePairWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ string r = db.HashGet("user:user1", "Title");
+ ClassicAssert.AreEqual("Tsavorite", r);
+ await Task.Delay(200);
+ r = db.HashGet("user:user1", "Title");
+ ClassicAssert.IsNull(r);
+ }
+
+ [Test]
+ public async Task CanSetWithExpireAndRemoveExpireByCallingSetAgain()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]);
+ await Task.Delay(200);
+ string r = db.HashGet("user:user1", "Title");
+ ClassicAssert.AreEqual("Tsavorite", r);
+ }
+
[Test]
public void CanSetAndGetOnePairLarge()
{
@@ -102,8 +130,6 @@ public void CanSetAndGetMultiplePairs()
ClassicAssert.AreEqual("2021", result[1].ToString());
}
-
-
[Test]
public void CanDelSingleField()
{
@@ -116,6 +142,19 @@ public void CanDelSingleField()
ClassicAssert.AreEqual("2021", resultGet);
}
+ [Test]
+ public void CanDelWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ var result = db.HashDelete(new RedisKey("user:user1"), new RedisValue("Title"));
+ ClassicAssert.AreEqual(true, result);
+ string resultGet = db.HashGet("user:user1", "Year");
+ ClassicAssert.AreEqual("2021", resultGet);
+ }
+
[Test]
public void CanDeleteMultipleFields()
@@ -153,6 +192,23 @@ public void CanDoHLen()
ClassicAssert.AreEqual(3, result);
}
+ [Test]
+ public async Task CanDoHLenWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ var result = db.HashLength("user:user1");
+ ClassicAssert.AreEqual(3, result);
+ await Task.Delay(150);
+ result = db.HashLength("user:user1");
+ ClassicAssert.AreEqual(2, result);
+ db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field
+ result = db.HashLength("user:user1");
+ ClassicAssert.AreEqual(2, result);
+ }
+
[Test]
public void CanDoGetAll()
{
@@ -167,6 +223,33 @@ public void CanDoGetAll()
ClassicAssert.IsTrue(hashEntries.OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name)));
}
+ [Test]
+ public async Task CanDoGetAllWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ HashEntry[] hashEntries =
+ [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")];
+ db.HashSet("user:user1", hashEntries);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+
+ var result = db.HashGetAll("user:user1");
+ ClassicAssert.AreEqual(hashEntries.Length, result.Length);
+ ClassicAssert.AreEqual(hashEntries.Length, result.Select(r => r.Name).Distinct().Count());
+ ClassicAssert.IsTrue(hashEntries.OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name)));
+
+ await Task.Delay(200);
+
+ result = db.HashGetAll("user:user1");
+ ClassicAssert.AreEqual(hashEntries.Length - 1, result.Length);
+ ClassicAssert.IsTrue(hashEntries.Skip(1).OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name)));
+
+ db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field
+ result = db.HashGetAll("user:user1");
+ ClassicAssert.AreEqual(hashEntries.Length - 1, result.Length);
+ ClassicAssert.IsTrue(hashEntries.Skip(1).Select(x => x.Value == "2021" ? new HashEntry(x.Name, "new2021") : x).OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name)));
+ }
+
[Test]
public void CanDoHExists()
{
@@ -180,6 +263,27 @@ public void CanDoHExists()
ClassicAssert.AreEqual(false, result);
}
+ [Test]
+ public async Task CanDoHExistsWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+
+ var result = db.HashExists(new RedisKey("user:user1"), "Title");
+ ClassicAssert.IsTrue(result);
+
+ await Task.Delay(200);
+
+ result = db.HashExists(new RedisKey("user:user1"), "Title");
+ ClassicAssert.IsFalse(result);
+
+ db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field
+ result = db.HashExists(new RedisKey("user:user1"), "Title");
+ ClassicAssert.IsFalse(result);
+ }
+
[Test]
public void CanDoHStrLen()
{
@@ -194,6 +298,27 @@ public void CanDoHStrLen()
ClassicAssert.AreEqual(0, r, 0);
}
+ [Test]
+ public async Task CanDoHStrLenWithExire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+
+ long r = db.HashStringLength("user:user1", "Title");
+ ClassicAssert.AreEqual(9, r);
+
+ await Task.Delay(200);
+
+ r = db.HashStringLength("user:user1", "Title");
+ ClassicAssert.AreEqual(0, r);
+
+ db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field
+ r = db.HashStringLength("user:user1", "Title");
+ ClassicAssert.AreEqual(0, r);
+ }
+
[Test]
public void CanDoHKeys()
{
@@ -208,6 +333,34 @@ public void CanDoHKeys()
ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company")));
}
+ [Test]
+ public async Task CanDoHKeysWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ var result = db.HashKeys("user:user1");
+ ClassicAssert.AreEqual(3, result.Length);
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Title")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company")));
+
+ await Task.Delay(200);
+
+ result = db.HashKeys("user:user1");
+ ClassicAssert.AreEqual(2, result.Length);
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company")));
+
+ db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field
+
+ result = db.HashKeys("user:user1");
+ ClassicAssert.AreEqual(2, result.Length);
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company")));
+ }
+
[Test]
public void CanDoHVals()
@@ -223,6 +376,34 @@ public void CanDoHVals()
ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme")));
}
+ [Test]
+ public async Task CanDoHValsWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ var result = db.HashValues("user:user1");
+ ClassicAssert.AreEqual(3, result.Length);
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Tsavorite")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("2021")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme")));
+
+ await Task.Delay(200);
+
+ result = db.HashValues("user:user1");
+ ClassicAssert.AreEqual(2, result.Length);
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("2021")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme")));
+
+ db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field
+
+ result = db.HashValues("user:user1");
+ ClassicAssert.AreEqual(2, result.Length);
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("new2021")));
+ ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme")));
+ }
+
[Test]
public void CanDoHIncrBy()
@@ -241,6 +422,22 @@ public void CanDoHIncrBy()
ClassicAssert.AreEqual(4, ((int?)getResult));
}
+ [Test]
+ public async Task CanDoHIncrByWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]);
+ db.HashFieldExpire("user:user1", ["Field2"], TimeSpan.FromMilliseconds(100));
+ var result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field2"), -4);
+ ClassicAssert.AreEqual(-3, result);
+
+ await Task.Delay(200);
+
+ result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field2"), -4);
+ ClassicAssert.AreEqual(-4, result);
+ }
+
[Test]
public void CanDoHIncrByLTM()
{
@@ -283,6 +480,22 @@ public void CheckHashIncrementDoublePrecision()
ClassicAssert.AreEqual(3.3333333333, result, 1e-15);
}
+ [Test]
+ public async Task CheckHashIncrementDoublePrecisionWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("user:user1", [new HashEntry("Field1", "1.1111111111")]);
+ db.HashFieldExpire("user:user1", ["Field1"], TimeSpan.FromMilliseconds(100));
+ var result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field1"), 2.2222222222);
+ ClassicAssert.AreEqual(3.3333333333, result, 1e-15);
+
+ await Task.Delay(200);
+
+ result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field1"), 2.2222222222);
+ ClassicAssert.AreEqual(2.2222222222, result, 1e-15);
+ }
+
[Test]
public void CanDoHSETNXCommand()
{
@@ -296,6 +509,21 @@ public void CanDoHSETNXCommand()
ClassicAssert.AreEqual("Hello", result);
}
+ [Test]
+ public async Task CanDoHSETNXCommandWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet(new RedisKey("user:user1"), new RedisValue("Field"), new RedisValue("Hello"));
+ db.HashFieldExpire("user:user1", ["Field"], TimeSpan.FromMilliseconds(100));
+ db.HashSet(new RedisKey("user:user1"), new RedisValue("Field"), new RedisValue("Hello"), When.NotExists);
+
+ await Task.Delay(200);
+
+ string result = db.HashGet("user:user1", "Field");
+ ClassicAssert.IsNull(result); // SetNX should not reset the expiration
+ }
+
[Test]
public void CanDoRandomField()
{
@@ -362,6 +590,45 @@ public void CanDoRandomField()
ClassicAssert.IsTrue(fieldsWithValues.All(e => hashDict.ContainsKey(e.Name) && hashDict[e.Name] == e.Value));
}
+ [Test]
+ public async Task CanDoRandomFieldWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+
+ var hashKey = new RedisKey("user:user1");
+ HashEntry[] hashEntries = [new HashEntry("Title", "Tsavorite")];
+ db.HashSet(hashKey, hashEntries);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ string field = db.HashRandomField(hashKey);
+ ClassicAssert.AreEqual(field, "Title");
+
+ await Task.Delay(200);
+
+ field = db.HashRandomField(hashKey);
+ ClassicAssert.IsNull(field);
+ }
+
+ [Test]
+ public async Task CanDoRandomFieldsWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+
+ var hashKey = new RedisKey("user:user1");
+ HashEntry[] hashEntries = [new HashEntry("Title", "Tsavorite")];
+ db.HashSet(hashKey, hashEntries);
+ db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100));
+ var field = db.HashRandomFields(hashKey, 10).Select(x => (string)x).ToArray();
+ ClassicAssert.AreEqual(field.Length, 1);
+ ClassicAssert.AreEqual("Title", field[0]);
+
+ await Task.Delay(200);
+
+ field = db.HashRandomFields(hashKey, 10).Select(x => (string)x).ToArray();
+ ClassicAssert.AreEqual(field.Length, 0);
+ }
+
[Test]
public void HashRandomFieldEmptyHash()
{
@@ -435,6 +702,27 @@ public void CanDoHashScan()
CollectionAssert.AreEquivalent(new[] { "email", "email1" }, fieldsStr);
}
+ [Test]
+ public async Task CanDoHashScanWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+
+ db.HashSet("user:user789", [new HashEntry("email", "email@example.com"), new HashEntry("email1", "email1@example.com"), new HashEntry("email2", "email2@example.com"), new HashEntry("email3", "email3@example.com"), new HashEntry("age", "25")]);
+ db.HashFieldExpire("user:user789", ["email"], TimeSpan.FromMilliseconds(100));
+
+ var members = db.HashScan("user:user789", "email*");
+ ClassicAssert.IsTrue(((IScanningCursor)members).Cursor == 0);
+ ClassicAssert.IsTrue(members.Count() == 4, "HSCAN with MATCH failed.");
+
+ await Task.Delay(200);
+
+ // HSCAN with match
+ members = db.HashScan("user:user789", "email*");
+ ClassicAssert.IsTrue(((IScanningCursor)members).Cursor == 0);
+ ClassicAssert.IsTrue(members.Count() == 3, "HSCAN with MATCH failed.");
+ }
+
[Test]
public void CanDoHashScanWithCursor()
@@ -517,6 +805,32 @@ public async Task CanDoHMGET()
#nullable disable
}
+ [Test]
+ public async Task CanDoHMGETWithExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+
+ db.HashSet("user:user789", [new HashEntry("email", "email@example.com"), new HashEntry("email1", "email1@example.com"), new HashEntry("email2", "email2@example.com"), new HashEntry("email3", "email3@example.com"), new HashEntry("age", "25")]);
+ db.HashFieldExpire("user:user789", ["email"], TimeSpan.FromMilliseconds(100));
+
+ var members = (string[])db.Execute("HMGET", "user:user789", "email", "email1");
+ ClassicAssert.AreEqual("email@example.com", members[0]);
+ ClassicAssert.AreEqual("email1@example.com", members[1]);
+
+ await Task.Delay(200);
+
+ members = (string[])db.Execute("HMGET", "user:user789", "email", "email1");
+ ClassicAssert.IsNull(members[0]);
+ ClassicAssert.AreEqual("email1@example.com", members[1]);
+
+ db.HashSet("user:user789", [new HashEntry("email2", "newemail2@example.com")]); // Trigger deletion of expired field
+
+ members = (string[])db.Execute("HMGET", "user:user789", "email", "email1");
+ ClassicAssert.IsNull(members[0]);
+ ClassicAssert.AreEqual("email1@example.com", members[1]);
+ }
+
[Test]
public async Task CanDoHGETALL()
@@ -693,7 +1007,282 @@ public void CheckHashOperationsOnWrongTypeObjectSE()
RespTestsUtils.CheckCommandOnWrongTypeObjectSE(() => db.HashStringLength(keys[0], hashFields[0][0]));
}
- #endregion
+ [Test]
+ public async Task CanDoHashExpire()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+ db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "value3"), new HashEntry("field4", "value4"), new HashEntry("field5", "value5"), new HashEntry("field6", "value6")]);
+
+ var result = db.Execute("HEXPIRE", "myhash", "3", "FIELDS", "3", "field1", "field5", "nonexistfield");
+ var results = (RedisResult[])result;
+ ClassicAssert.AreEqual(3, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]);
+ ClassicAssert.AreEqual(1, (long)results[1]);
+ ClassicAssert.AreEqual(-2, (long)results[2]);
+
+ result = db.Execute("HPEXPIRE", "myhash", "3000", "FIELDS", "2", "field2", "nonexistfield");
+ results = (RedisResult[])result;
+ ClassicAssert.AreEqual(2, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]);
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ result = db.Execute("HEXPIREAT", "myhash", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(), "FIELDS", "2", "field3", "nonexistfield");
+ results = (RedisResult[])result;
+ ClassicAssert.AreEqual(2, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]);
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ result = db.Execute("HPEXPIREAT", "myhash", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(), "FIELDS", "2", "field4", "nonexistfield");
+ results = (RedisResult[])result;
+ ClassicAssert.AreEqual(2, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]);
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "nonexistfield");
+ ClassicAssert.AreEqual(2, ttl.Length);
+ ClassicAssert.LessOrEqual((long)ttl[0], 3);
+ ClassicAssert.Greater((long)ttl[0], 1);
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ ttl = (RedisResult[])db.Execute("HPTTL", "myhash", "FIELDS", "2", "field1", "nonexistfield");
+ ClassicAssert.AreEqual(2, ttl.Length);
+ ClassicAssert.LessOrEqual((long)ttl[0], 3000);
+ ClassicAssert.Greater((long)ttl[0], 1000);
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ ttl = (RedisResult[])db.Execute("HEXPIRETIME", "myhash", "FIELDS", "2", "field1", "nonexistfield");
+ ClassicAssert.AreEqual(2, ttl.Length);
+ ClassicAssert.LessOrEqual((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds());
+ ClassicAssert.Greater((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeSeconds());
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ ttl = (RedisResult[])db.Execute("HPEXPIRETIME", "myhash", "FIELDS", "2", "field1", "nonexistfield");
+ ClassicAssert.AreEqual(2, ttl.Length);
+ ClassicAssert.LessOrEqual((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds());
+ ClassicAssert.Greater((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeMilliseconds());
+ ClassicAssert.AreEqual(-2, (long)results[1]);
+
+ results = (RedisResult[])db.Execute("HPERSIST", "myhash", "FIELDS", "3", "field5", "field6", "nonexistfield");
+ ClassicAssert.AreEqual(3, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]); // 1 the expiration was removed.
+ ClassicAssert.AreEqual(-1, (long)results[1]); // -1 if the field exists but has no associated expiration set.
+ ClassicAssert.AreEqual(-2, (long)results[2]);
+
+ await Task.Delay(3500);
+
+ var items = db.HashGetAll("myhash");
+ ClassicAssert.AreEqual(2, items.Length);
+ ClassicAssert.AreEqual("field5", items[0].Name.ToString());
+ ClassicAssert.AreEqual("value5", items[0].Value.ToString());
+ ClassicAssert.AreEqual("field6", items[1].Name.ToString());
+ ClassicAssert.AreEqual("value6", items[1].Value.ToString());
+
+ result = db.Execute("HEXPIRE", "myhash", "0", "FIELDS", "1", "field5");
+ results = (RedisResult[])result;
+ ClassicAssert.AreEqual(1, results.Length);
+ ClassicAssert.AreEqual(2, (long)results[0]);
+
+ result = db.Execute("HEXPIREAT", "myhash", DateTimeOffset.UtcNow.AddSeconds(-1).ToUnixTimeSeconds().ToString(), "FIELDS", "1", "field6");
+ results = (RedisResult[])result;
+ ClassicAssert.AreEqual(1, results.Length);
+ ClassicAssert.AreEqual(2, (long)results[0]);
+
+ items = db.HashGetAll("myhash");
+ ClassicAssert.AreEqual(0, items.Length);
+ }
+
+ [Test]
+ public async Task CanDoHashExpireLTM()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig(allowAdmin: true));
+ var db = redis.GetDatabase(0);
+ var server = redis.GetServer(TestUtils.Address, TestUtils.Port);
+
+ string[] smallExpireKeys = ["user:user0", "user:user1"];
+ string[] largeExpireKeys = ["user:user2", "user:user3"];
+
+ foreach (var key in smallExpireKeys)
+ {
+ db.HashSet(key, [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]);
+ db.Execute("HEXPIRE", key, "2", "FIELDS", "1", "Field1");
+ }
+
+ foreach (var key in largeExpireKeys)
+ {
+ db.HashSet(key, [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]);
+ db.Execute("HEXPIRE", key, "4", "FIELDS", "1", "Field1");
+ }
+
+ // Create LTM (larger than memory) DB by inserting 100 keys
+ for (int i = 4; i < 100; i++)
+ {
+ var key = "user:user" + i;
+ db.HashSet(key, [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]);
+ }
+
+ var info = TestUtils.GetStoreAddressInfo(server, includeReadCache: true, isObjectStore: true);
+ // Ensure data has spilled to disk
+ ClassicAssert.Greater(info.HeadAddress, info.BeginAddress);
+
+ await Task.Delay(2000);
+
+ var result = db.HashExists(smallExpireKeys[0], "Field1");
+ ClassicAssert.IsFalse(result);
+ result = db.HashExists(smallExpireKeys[1], "Field1");
+ ClassicAssert.IsFalse(result);
+ result = db.HashExists(largeExpireKeys[0], "Field1");
+ ClassicAssert.IsTrue(result);
+ result = db.HashExists(largeExpireKeys[1], "Field1");
+ ClassicAssert.IsTrue(result);
+ var ttl = db.HashFieldGetTimeToLive(largeExpireKeys[0], ["Field1"]);
+ ClassicAssert.AreEqual(ttl.Length, 1);
+ ClassicAssert.Greater(ttl[0], 0);
+ ClassicAssert.LessOrEqual(ttl[0], 2000);
+ ttl = db.HashFieldGetTimeToLive(largeExpireKeys[1], ["Field1"]);
+ ClassicAssert.AreEqual(ttl.Length, 1);
+ ClassicAssert.Greater(ttl[0], 0);
+ ClassicAssert.LessOrEqual(ttl[0], 2000);
+
+ await Task.Delay(2000);
+
+ result = db.HashExists(largeExpireKeys[0], "Field1");
+ ClassicAssert.IsFalse(result);
+ result = db.HashExists(largeExpireKeys[1], "Field1");
+ ClassicAssert.IsFalse(result);
+
+ var data = db.HashGetAll("user:user4");
+ ClassicAssert.AreEqual(2, data.Length);
+ ClassicAssert.AreEqual("Field1", data[0].Name.ToString());
+ ClassicAssert.AreEqual("StringValue", data[0].Value.ToString());
+ ClassicAssert.AreEqual("Field2", data[1].Name.ToString());
+ ClassicAssert.AreEqual("1", data[1].Value.ToString());
+ }
+
+ [Test]
+ public void CanDoHashExpireWithNonExistKey()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+
+ var result = db.Execute("HEXPIRE", "myhash", "3", "FIELDS", "1", "field1");
+ var results = (RedisResult[])result;
+ ClassicAssert.AreEqual(1, results.Length);
+ ClassicAssert.AreEqual(-2, (long)results[0]);
+ }
+
+ [Test]
+ public async Task CanDoHashCollect()
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig(allowAdmin: true));
+ var db = redis.GetDatabase(0);
+ var server = redis.GetServers().First();
+ db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "value3"), new HashEntry("field4", "value4"), new HashEntry("field5", "value5"), new HashEntry("field6", "value6")]);
+
+ var result = db.Execute("HEXPIRE", "myhash", "1", "FIELDS", "2", "field1", "field2");
+ var results = (RedisResult[])result;
+ ClassicAssert.AreEqual(2, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]);
+ ClassicAssert.AreEqual(1, (long)results[1]);
+
+ result = db.Execute("HEXPIRE", "myhash", "3", "FIELDS", "2", "field3", "field4");
+ results = (RedisResult[])result;
+ ClassicAssert.AreEqual(2, results.Length);
+ ClassicAssert.AreEqual(1, (long)results[0]);
+ ClassicAssert.AreEqual(1, (long)results[1]);
+
+ var orginalMemory = (long)db.Execute("MEMORY", "USAGE", "myhash");
+
+ await Task.Delay(1200);
+
+ var newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash");
+ ClassicAssert.AreEqual(newMemory, orginalMemory);
+
+ var collectResult = (string)db.Execute("HCOLLECT", "myhash");
+ ClassicAssert.AreEqual("OK", collectResult);
+
+ newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash");
+ ClassicAssert.Less(newMemory, orginalMemory);
+ orginalMemory = newMemory;
+
+ await Task.Delay(2200);
+
+ newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash");
+ ClassicAssert.AreEqual(newMemory, orginalMemory);
+
+ collectResult = (string)db.Execute("HCOLLECT", "*");
+ ClassicAssert.AreEqual("OK", collectResult);
+
+ newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash");
+ ClassicAssert.Less(newMemory, orginalMemory);
+ }
+
+ [Test]
+ [TestCase("HEXPIRE", "NX", Description = "Set expiry only when no expiration exists")]
+ [TestCase("HEXPIRE", "XX", Description = "Set expiry only when expiration exists")]
+ [TestCase("HEXPIRE", "GT", Description = "Set expiry only when new TTL is greater")]
+ [TestCase("HEXPIRE", "LT", Description = "Set expiry only when new TTL is less")]
+ [TestCase("HPEXPIRE", "NX", Description = "Set expiry only when no expiration exists")]
+ [TestCase("HPEXPIRE", "XX", Description = "Set expiry only when expiration exists")]
+ [TestCase("HPEXPIRE", "GT", Description = "Set expiry only when new TTL is greater")]
+ [TestCase("HPEXPIRE", "LT", Description = "Set expiry only when new TTL is less")]
+ [TestCase("HEXPIREAT", "NX", Description = "Set expiry only when no expiration exists")]
+ [TestCase("HEXPIREAT", "XX", Description = "Set expiry only when expiration exists")]
+ [TestCase("HEXPIREAT", "GT", Description = "Set expiry only when new TTL is greater")]
+ [TestCase("HEXPIREAT", "LT", Description = "Set expiry only when new TTL is less")]
+ [TestCase("HPEXPIREAT", "NX", Description = "Set expiry only when no expiration exists")]
+ [TestCase("HPEXPIREAT", "XX", Description = "Set expiry only when expiration exists")]
+ [TestCase("HPEXPIREAT", "GT", Description = "Set expiry only when new TTL is greater")]
+ [TestCase("HPEXPIREAT", "LT", Description = "Set expiry only when new TTL is less")]
+ public void CanDoHashExpireWithOptions(string command, string option)
+ {
+ using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig());
+ var db = redis.GetDatabase(0);
+
+ db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "welcome"), new HashEntry("field4", "back")]);
+
+ (var expireTimeField1, var expireTimeField3, var newExpireTimeField) = command switch
+ {
+ "HEXPIRE" => ("1", "3", "2"),
+ "HPEXPIRE" => ("1000", "3000", "2000"),
+ "HEXPIREAT" => (DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeSeconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(2).ToUnixTimeSeconds().ToString()),
+ "HPEXPIREAT" => (DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeMilliseconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(2).ToUnixTimeMilliseconds().ToString()),
+ _ => throw new ArgumentException("Invalid command")
+ };
+
+ // First set TTL for field1 only
+ db.Execute(command, "myhash", expireTimeField1, "FIELDS", "1", "field1");
+ db.Execute(command, "myhash", expireTimeField3, "FIELDS", "1", "field3");
+
+ // Try setting TTL with option
+ var result = (RedisResult[])db.Execute(command, "myhash", newExpireTimeField, option, "FIELDS", "3", "field1", "field2", "field3");
+
+ switch (option)
+ {
+ case "NX":
+ ClassicAssert.AreEqual(0, (long)result[0]); // field1 has TTL
+ ClassicAssert.AreEqual(1, (long)result[1]); // field2 no TTL
+ ClassicAssert.AreEqual(0, (long)result[2]); // field1 has TTL
+ break;
+ case "XX":
+ ClassicAssert.AreEqual(1, (long)result[0]); // field1 has TTL
+ ClassicAssert.AreEqual(0, (long)result[1]); // field2 no TTL
+ ClassicAssert.AreEqual(1, (long)result[2]); // field1 has TTL
+ break;
+ case "GT":
+ ClassicAssert.AreEqual(1, (long)result[0]); // 20 > 10
+ ClassicAssert.AreEqual(0, (long)result[1]); // no TTL = infinite
+ ClassicAssert.AreEqual(0, (long)result[2]); // 20 !> 30
+ break;
+ case "LT":
+ ClassicAssert.AreEqual(0, (long)result[0]); // 20 !< 10
+ ClassicAssert.AreEqual(1, (long)result[1]); // no TTL = infinite
+ ClassicAssert.AreEqual(1, (long)result[2]); // 20 < 30
+ break;
+ }
+ }
+
+ #endregion
#region LightClientTests
diff --git a/website/docs/commands/api-compatibility.md b/website/docs/commands/api-compatibility.md
index 3e2e1f3d36..a5eb1adc4c 100644
--- a/website/docs/commands/api-compatibility.md
+++ b/website/docs/commands/api-compatibility.md
@@ -163,9 +163,9 @@ Note that this list is subject to change as we continue to expand our API comman
| | [GEOSEARCHSTORE](data-structures.md#geosearchstore) | ➕ | Partially Implemented |
| **HASH** | [HDEL](data-structures.md#hdel) | ➕ | |
| | [HEXISTS](data-structures.md#hexists) | ➕ | |
-| | HEXPIRE | ➖ | |
-| | HEXPIREAT | ➖ | |
-| | HEXPIRETIME | ➖ | |
+| | [HEXPIRE](data-structures.md#hexpire) | ➕ | |
+| | [HEXPIREAT](data-structures.md#hexpireat) | ➕ | |
+| | [HEXPIRETIME](data-structures.md#hexpiretime) | ➕ | |
| | [HGET](data-structures.md#hget) | ➕ | |
| | [HGETALL](data-structures.md#hgetall) | ➕ | |
| | [HINCRBY](data-structures.md#hincrby) | ➕ | |
@@ -174,17 +174,17 @@ Note that this list is subject to change as we continue to expand our API comman
| | [HLEN](data-structures.md#hlen) | ➕ | |
| | [HMGET](data-structures.md#hmget) | ➕ | |
| | [HMSET](data-structures.md#hmset) | ➕ | (Deprecated) |
-| | HPERSIST | ➖ | |
-| | HPEXPIRE | ➖ | |
-| | HPEXPIREAT | ➖ | |
-| | HPEXPIRETIME | ➖ | |
-| | HPTTL | ➖ | |
+| | [HPERSIST](data-structures.md#hpersist) | ➕ | |
+| | [HPEXPIRE](data-structures.md#hpexpire) | ➕ | |
+| | [HPEXPIREAT](data-structures.md#hpexpireat) | ➕ | |
+| | [HPEXPIRETIME](data-structures.md#hepxpiretime) | ➕ | |
+| | [HPTTL](data-structures.md#hpttl) | ➕ | |
| | [HRANDFIELD](data-structures.md#hrandfield) | ➕ | |
| | [HSCAN](data-structures.md#hscan) | ➕ | |
| | [HSET](data-structures.md#hset) | ➕ | |
| | [HSETNX](data-structures.md#hsetnx) | ➕ | |
| | [HSTRLEN](data-structures.md#hstrlen) | ➕ | |
-| | HTTL | ➖ | |
+| | [HTTL](data-structures.md#httl) | ➕ | |
| | [HVALS](data-structures.md#hvals) | ➕ | |
| **HYPERLOGLOG** | [PFADD](analytics.md#pfadd) | ➕ | |
| | [PFCOUNT](analytics.md#pfcount) | ➕ | |
diff --git a/website/docs/commands/data-structures.md b/website/docs/commands/data-structures.md
index fce8c59ceb..e0106fe51e 100644
--- a/website/docs/commands/data-structures.md
+++ b/website/docs/commands/data-structures.md
@@ -218,6 +218,241 @@ Returns all values in the hash stored at **key**.
---
+### HEXPIRE
+
+#### Syntax
+
+```bash
+ HEXPIRE key seconds [NX | XX | GT | LT] FIELDS numfields field [field ...]
+```
+
+Sets a timeout on one or more fields of a hash key. After the timeout has expired, the fields will automatically be deleted. The timeout is specified in seconds.
+
+The command supports several options to control when the expiration should be set:
+
+* **NX:** Only set expiry on fields that have no existing expiry
+* **XX:** Only set expiry on fields that already have an expiry set
+* **GT:** Only set expiry when it's greater than the current expiry
+* **LT:** Only set expiry when it's less than the current expiry
+
+The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* 1 if the timeout was set
+* 0 if the field doesn't exist
+* -1 if timeout was not set due to condition not being met
+
+---
+
+### HEXPIREAT
+
+#### Syntax
+
+```bash
+ HEXPIREAT key unix-time-seconds [NX | XX | GT | LT] FIELDS numfields field [field ...]
+```
+
+Sets an absolute expiration time (Unix timestamp in seconds) for one or more hash fields. After the timestamp has passed, the fields will automatically be deleted.
+
+The command supports several options to control when the expiration should be set:
+
+* **NX:** Only set expiry on fields that have no existing expiry
+* **XX:** Only set expiry on fields that already have an expiry set
+* **GT:** Only set expiry when it's greater than the current expiry
+* **LT:** Only set expiry when it's less than the current expiry
+
+The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* 1 if the timeout was set
+* 0 if the field doesn't exist
+* -1 if timeout was not set due to condition not being met
+
+---
+
+### HPEXPIRE
+
+#### Syntax
+
+```bash
+ HPEXPIRE key milliseconds [NX | XX | GT | LT] FIELDS numfields field [field ...]
+```
+
+Similar to HEXPIRE but the timeout is specified in milliseconds instead of seconds.
+
+The command supports several options to control when the expiration should be set:
+
+* **NX:** Only set expiry on fields that have no existing expiry
+* **XX:** Only set expiry on fields that already have an expiry set
+* **GT:** Only set expiry when it's greater than the current expiry
+* **LT:** Only set expiry when it's less than the current expiry
+
+The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* 1 if the timeout was set
+* 0 if the field doesn't exist
+* -1 if timeout was not set due to condition not being met
+
+---
+
+### HPEXPIREAT
+
+#### Syntax
+
+```bash
+ HPEXPIREAT key unix-time-milliseconds [NX | XX | GT | LT] FIELDS numfields field [field ...]
+```
+
+Similar to HEXPIREAT but uses Unix timestamp in milliseconds instead of seconds.
+
+The command supports several options to control when the expiration should be set:
+
+* **NX:** Only set expiry on fields that have no existing expiry
+* **XX:** Only set expiry on fields that already have an expiry set
+* **GT:** Only set expiry when it's greater than the current expiry
+* **LT:** Only set expiry when it's less than the current expiry
+
+The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* 1 if the timeout was set
+* 0 if the field doesn't exist
+* -1 if timeout was not set due to condition not being met
+
+---
+
+### HTTL
+
+#### Syntax
+
+```bash
+ HTTL key FIELDS numfields field [field ...]
+```
+
+Returns the remaining time to live in seconds for one or more hash fields that have a timeout set.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* TTL in seconds if the field exists and has an expiry set
+* -1 if the field exists but has no expiry set
+* -2 if the field does not exist
+
+---
+
+### HPTTL
+
+#### Syntax
+
+```bash
+ HPTTL key FIELDS numfields field [field ...]
+```
+
+Similar to HTTL but returns the remaining time to live in milliseconds instead of seconds.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* TTL in milliseconds if the field exists and has an expiry set
+* -1 if the field exists but has no expiry set
+* -2 if the field does not exist
+
+---
+
+### HEXPIRETIME
+
+#### Syntax
+
+```bash
+ HEXPIRETIME key FIELDS numfields field [field ...]
+```
+
+Returns the absolute Unix timestamp (in seconds) at which the specified hash fields will expire.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* Unix timestamp in seconds when the field will expire
+* -1 if the field exists but has no expiry set
+* -2 if the field does not exist
+
+---
+
+### HPEXPIRETIME
+
+#### Syntax
+
+```bash
+ HPEXPIRETIME key FIELDS numfields field [field ...]
+```
+
+Similar to HEXPIRETIME but returns the expiry timestamp in milliseconds instead of seconds.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* Unix timestamp in milliseconds when the field will expire
+* -1 if the field exists but has no expiry set
+* -2 if the field does not exist
+
+---
+
+### HPERSIST
+
+#### Syntax
+
+```bash
+ HPERSIST key FIELDS numfields field [field ...]
+```
+
+Removes the expiration from the specified hash fields, making them persistent.
+
+#### Resp Reply
+
+Array reply: For each field, returns:
+
+* 1 if the timeout was removed
+* 0 if the field exists but has no timeout
+* -1 if the field does not exist
+
+---
+
+### HCOLLECT
+
+#### Syntax
+
+```bash
+ HCOLLECT key [key ...]
+```
+
+Manualy trigger cleanup of expired field from memory for a given Hash set key.
+
+Use `*` as the key to collect it from all hash keys.
+
+#### Resp Reply
+
+Simple reply: OK response
+Error reply: ERR HCOLLECT scan already in progress
+
+---
+
## List
### BLMOVE
diff --git a/website/docs/commands/garnet-specific.md b/website/docs/commands/garnet-specific.md
index d98b6bcfa0..88248feb0f 100644
--- a/website/docs/commands/garnet-specific.md
+++ b/website/docs/commands/garnet-specific.md
@@ -42,6 +42,25 @@ Simple string reply: OK.
---
+### HCOLLECT
+
+#### Syntax
+
+```bash
+ HCOLLECT key [key ...]
+```
+
+Manualy trigger cleanup of expired field from memory for a given Hash set key.
+
+Use `*` as the key to collect it from all hash keys.
+
+#### Resp Reply
+
+Simple reply: OK response
+Error reply: ERR HCOLLECT scan already in progress
+
+---
+
### COSCAN
#### Syntax
diff --git a/website/docs/getting-started/configuration.md b/website/docs/getting-started/configuration.md
index b66cf15f89..ac0d130154 100644
--- a/website/docs/getting-started/configuration.md
+++ b/website/docs/getting-started/configuration.md
@@ -119,6 +119,7 @@ For all available command line settings, run `GarnetServer.exe -h` or `GarnetSer
| **WaitForCommit** | ```--aof-commit-wait``` | ```bool``` | | Wait for AOF to flush the commit before returning results to client. Warning: will greatly increase operation latency. |
| **AofSizeLimit** | ```--aof-size-limit``` | ```string``` | Memory size | Maximum size of AOF (rounds down to power of 2) after which unsafe truncation will be applied. Left empty AOF will grow without bound unless a checkpoint is taken |
| **CompactionFrequencySecs** | ```--compaction-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) |
+| **HashCollectFrequencySecs** | ```--hcollect-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. Use the HCOLLECT API to collect on-demand. |
| **CompactionType** | ```--compaction-type``` | ```LogCompactionType``` | None, Shift, Scan, Lookup | Hybrid log compaction type. Value options: None - No compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss) |
| **CompactionForceDelete** | ```--compaction-force-delete``` | ```bool``` | | Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied. If false, take a checkpoint to actually delete the older data files from disk. |
| **CompactionMaxSegments** | ```--compaction-max-segments``` | ```int``` | Integer in range:
[0, MaxValue] | Number of log segments created on disk before compaction triggers. |