From 4c955b3e9c6000f848523d2564cdadee5b14140b Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Mon, 9 Dec 2024 11:31:06 +0530 Subject: [PATCH 01/24] WIP of HEXPIRE --- libs/resources/RespCommandsDocs.json | 316 +++++++++++++++++++++++++++ libs/resources/RespCommandsInfo.json | 100 +++++++++ 2 files changed, 416 insertions(+) diff --git a/libs/resources/RespCommandsDocs.json b/libs/resources/RespCommandsDocs.json index 1c6b34e89f..bc5073e233 100644 --- a/libs/resources/RespCommandsDocs.json +++ b/libs/resources/RespCommandsDocs.json @@ -2790,6 +2790,164 @@ } ] }, + { + "Command": "HEXPIRE", + "Name": "HEXPIRE", + "Summary": "Set expiry for hash field using relative time to expire (seconds)", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "SECONDS", + "DisplayText": "seconds", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "CONDITION", + "Type": "OneOf", + "ArgumentFlags": "Optional", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NX", + "DisplayText": "nx", + "Type": "PureToken", + "Token": "NX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "XX", + "DisplayText": "xx", + "Type": "PureToken", + "Token": "XX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "GT", + "DisplayText": "gt", + "Type": "PureToken", + "Token": "GT" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "LT", + "DisplayText": "lt", + "Type": "PureToken", + "Token": "LT" + } + ] + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, + { + "Command": "HEXPIREAT", + "Name": "HEXPIREAT", + "Summary": "Set expiry for hash field using an absolute Unix timestamp (seconds)", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "UNIX-TIME-SECONDS", + "DisplayText": "unix-time-seconds", + "Type": "UnixTime" + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "CONDITION", + "Type": "OneOf", + "ArgumentFlags": "Optional", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NX", + "DisplayText": "nx", + "Type": "PureToken", + "Token": "NX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "XX", + "DisplayText": "xx", + "Type": "PureToken", + "Token": "XX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "GT", + "DisplayText": "gt", + "Type": "PureToken", + "Token": "GT" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "LT", + "DisplayText": "lt", + "Type": "PureToken", + "Token": "LT" + } + ] + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, { "Command": "HGET", "Name": "HGET", @@ -2977,6 +3135,164 @@ } ] }, + { + "Command": "HPEXPIRE", + "Name": "HPEXPIRE", + "Summary": "Set expiry for hash field using relative time to expire (milliseconds)", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "MILLISECONDS", + "DisplayText": "milliseconds", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "CONDITION", + "Type": "OneOf", + "ArgumentFlags": "Optional", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NX", + "DisplayText": "nx", + "Type": "PureToken", + "Token": "NX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "XX", + "DisplayText": "xx", + "Type": "PureToken", + "Token": "XX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "GT", + "DisplayText": "gt", + "Type": "PureToken", + "Token": "GT" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "LT", + "DisplayText": "lt", + "Type": "PureToken", + "Token": "LT" + } + ] + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, + { + "Command": "HPEXPIREAT", + "Name": "HPEXPIREAT", + "Summary": "Set expiry for hash field using an absolute Unix timestamp (milliseconds)", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "UNIX-TIME-MILLISECONDS", + "DisplayText": "unix-time-milliseconds", + "Type": "UnixTime" + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "CONDITION", + "Type": "OneOf", + "ArgumentFlags": "Optional", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NX", + "DisplayText": "nx", + "Type": "PureToken", + "Token": "NX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "XX", + "DisplayText": "xx", + "Type": "PureToken", + "Token": "XX" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "GT", + "DisplayText": "gt", + "Type": "PureToken", + "Token": "GT" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "LT", + "DisplayText": "lt", + "Type": "PureToken", + "Token": "LT" + } + ] + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, { "Command": "HRANDFIELD", "Name": "HRANDFIELD", diff --git a/libs/resources/RespCommandsInfo.json b/libs/resources/RespCommandsInfo.json index ccaa73b7d2..826c6cf00f 100644 --- a/libs/resources/RespCommandsInfo.json +++ b/libs/resources/RespCommandsInfo.json @@ -1596,6 +1596,56 @@ } ] }, + { + "Command": "HEXPIRE", + "Name": "HEXPIRE", + "Arity": -6, + "Flags": "DenyOom, Fast, Write", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Write", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RW, Update" + } + ] + }, + { + "Command": "HEXPIREAT", + "Name": "HEXPIREAT", + "Arity": -6, + "Flags": "DenyOom, Fast, Write", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Write", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RW, Update" + } + ] + }, { "Command": "HGET", "Name": "HGET", @@ -1802,6 +1852,56 @@ } ] }, + { + "Command": "HPEXPIRE", + "Name": "HPEXPIRE", + "Arity": -6, + "Flags": "DenyOom, Fast, Write", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Write", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RW, Update" + } + ] + }, + { + "Command": "HPEXPIREAT", + "Name": "HPEXPIREAT", + "Arity": -6, + "Flags": "DenyOom, Fast, Write", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Write", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RW, Update" + } + ] + }, { "Command": "HRANDFIELD", "Name": "HRANDFIELD", From e7695e25104860385a1819df2af85251387291ae Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Mon, 9 Dec 2024 11:33:27 +0530 Subject: [PATCH 02/24] WIP of EXPIRE --- libs/server/API/GarnetApiObjectCommands.cs | 4 + libs/server/API/IGarnetApi.cs | 11 + libs/server/Objects/Hash/HashObject.cs | 236 +++++++++++++++++- libs/server/Objects/Hash/HashObjectImpl.cs | 89 +++++-- libs/server/Resp/ClientCommands.cs | 2 +- libs/server/Resp/CmdStrings.cs | 6 +- libs/server/Resp/Objects/HashCommands.cs | 142 +++++++++++ libs/server/Resp/Objects/ListCommands.cs | 4 +- libs/server/Resp/Parser/RespCommand.cs | 8 + libs/server/Resp/RespServerSession.cs | 4 + .../Storage/Session/ObjectStore/HashOps.cs | 32 +++ .../CommandInfoUpdater/SupportedCommand.cs | 4 + test/Garnet.test/RespHashTests.cs | 146 +++++++++++ 13 files changed, 656 insertions(+), 32 deletions(-) diff --git a/libs/server/API/GarnetApiObjectCommands.cs b/libs/server/API/GarnetApiObjectCommands.cs index 86bc00a04f..2cbdeb0b0c 100644 --- a/libs/server/API/GarnetApiObjectCommands.cs +++ b/libs/server/API/GarnetApiObjectCommands.cs @@ -469,6 +469,10 @@ public GarnetStatus HashIncrement(byte[] key, ArgSlice input, out ObjectOutputHe public GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) => storageSession.HashIncrement(key, ref input, ref outputFooter, ref objectContext); + /// + public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) + => storageSession.HashExpire(key, expireAt, isMilliseconds, expireOption, ref input, ref outputFooter, ref objectContext); + /// public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items) => storageSession.ObjectScan(GarnetObjectType.Hash, key, cursor, match, count, out items, ref objectContext); diff --git a/libs/server/API/IGarnetApi.cs b/libs/server/API/IGarnetApi.cs index b1676d1391..c5ab27bd69 100644 --- a/libs/server/API/IGarnetApi.cs +++ b/libs/server/API/IGarnetApi.cs @@ -952,6 +952,17 @@ public interface IGarnetApi : IGarnetReadApi, IGarnetAdvancedApi /// GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter); + /// + /// Sets an expiration time on a hash field. + /// + /// The key of the hash. + /// The expiration time in Unix timestamp format. + /// The expiration option to apply. + /// The input object containing additional parameters. + /// The output object to store the result. + /// The status of the operation. + GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter); + #endregion #region BitMaps Methods diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index bfa3a8b410..4ddc8a160e 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -5,6 +5,8 @@ using System.Collections.Generic; using System.Diagnostics; using System.IO; +using System.Linq; +using System.Runtime.CompilerServices; using Garnet.common; using Tsavorite.core; @@ -17,6 +19,8 @@ namespace Garnet.server /// public enum HashOperation : byte { + HCOLLECT, + HEXPIRE, HGET, HMGET, HSET, @@ -42,6 +46,8 @@ public enum HashOperation : byte public unsafe partial class HashObject : GarnetObjectBase { readonly Dictionary hash; + Dictionary expirationTimes; + PriorityQueue expirationQueue; /// /// Constructor @@ -58,6 +64,7 @@ public HashObject(long expiration = 0) public HashObject(BinaryReader reader) : base(reader, MemoryUtils.DictionaryOverhead) { + // TODO: Handle deserialization of expiration times hash = new Dictionary(ByteArrayComparer.Instance); int count = reader.ReadInt32(); @@ -69,15 +76,34 @@ public HashObject(BinaryReader reader) this.UpdateSize(item, value); } + + int expireCount = reader.ReadInt32(); + // TODO: Can we delete expired items during serialization and deserialization? + if (expireCount > 0) + { + expirationTimes = new Dictionary(ByteArrayComparer.Instance); + expirationQueue = new PriorityQueue(); + for (int i = 0; i < count; i++) + { + var item = reader.ReadBytes(reader.ReadInt32()); + var value = reader.ReadInt64(); + expirationTimes.Add(item, value); + expirationQueue.Enqueue(item, value); + + // TODO: Update size + } + } } /// /// Copy constructor /// - public HashObject(Dictionary hash, long expiration, long size) + public HashObject(Dictionary hash, Dictionary expirationTimes, PriorityQueue expirationQueue, long expiration, long size) : base(expiration, size) { this.hash = hash; + this.expirationTimes = expirationTimes; + this.expirationQueue = expirationQueue; } /// @@ -98,6 +124,23 @@ public override void DoSerialize(BinaryWriter writer) writer.Write(kvp.Value); count--; } + + if (expirationTimes is not null) + { + // TODO: Can we delete expired items during serialization and deserialization? + writer.Write(expirationTimes.Count); + foreach (var kvp in expirationTimes) + { + writer.Write(kvp.Key.Length); + writer.Write(kvp.Key); + writer.Write(kvp.Value); + } + } + else + { + // TODO: This will break backward compatibility, Do we need to handle this? + writer.Write(0); + } Debug.Assert(count == 0); } @@ -105,7 +148,7 @@ public override void DoSerialize(BinaryWriter writer) public override void Dispose() { } /// - public override GarnetObjectBase Clone() => new HashObject(hash, Expiration, Size); + public override GarnetObjectBase Clone() => new HashObject(hash, expirationTimes, expirationQueue, Expiration, Size); /// public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory output, out long sizeChange, out bool removeKey) @@ -152,6 +195,9 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory case HashOperation.HEXISTS: HashExists(ref input, _output); break; + case HashOperation.HEXPIRE: + HashExpire(ref input, ref output); + break; case HashOperation.HKEYS: HashGetKeysOrValues(ref input, ref output); break; @@ -196,6 +242,7 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory private void UpdateSize(ReadOnlySpan key, ReadOnlySpan value, bool add = true) { + // TODO: Should we consider the size of the key and value of the expire dictionary and queue? var size = Utility.RoundUp(key.Length, IntPtr.Size) + Utility.RoundUp(value.Length, IntPtr.Size) + (2 * MemoryUtils.ByteArrayOverhead) + MemoryUtils.DictionaryEntryOverhead; this.Size += add ? size : -size; @@ -217,8 +264,15 @@ public override unsafe void Scan(long start, out List items, out long cu // Hashset has key and value, so count is multiplied by 2 count = isNoValue ? count : count * 2; int index = 0; + var expiredKeysCount = 0; foreach (var item in hash) { + if (IsExpired(item.Key)) + { + expiredKeysCount++; + continue; + } + if (index < start) { index++; @@ -256,8 +310,184 @@ public override unsafe void Scan(long start, out List items, out long cu } // Indicates end of collection has been reached. - if (cursor == hash.Count) + if (cursor + expiredKeysCount == hash.Count) cursor = 0; } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private bool IsExpired(byte[] key) => expirationTimes is not null && expirationTimes.TryGetValue(key, out var expiration) && expiration < DateTimeOffset.UtcNow.Ticks; + + private void DeleteExpiredItems() + { + if (expirationTimes is null) + return; + + var hasValue = expirationQueue.TryPeek(out var key, out var expiration); + + if (!hasValue) + { + expirationTimes = null; + expirationQueue = null; + return; + } + + while (expiration < DateTimeOffset.UtcNow.Ticks) + { + expirationQueue.TryDequeue(out key, out _); + expirationTimes.Remove(key); + hash.Remove(key); + // TODO: Update size + hasValue = expirationQueue.TryPeek(out key, out expiration); + if (!hasValue) + { + expirationTimes = null; + expirationQueue = null; + break; + } + } + } + + private bool TryGetValue(byte[] key, out byte[] value) + { + value = default; + if (IsExpired(key)) + { + return false; + } + return hash.TryGetValue(key, out value); + } + + private bool Remove(byte[] key, out byte[] value) + { + DeleteExpiredItems(); + return hash.Remove(key, out value); + } + + private int Count() + { + if (expirationTimes is not null) + { + var expiredKeysCount = 0; + foreach (var item in expirationTimes) + { + if (IsExpired(item.Key)) + { + expiredKeysCount++; + } + } + + return hash.Count - expiredKeysCount; + } + + return hash.Count; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private bool HasExpirableItems() + { + return expirationTimes is not null; + } + + private bool ContainsKey(byte[] key) + { + var result = hash.ContainsKey(key); + if (result && IsExpired(key)) + { + return false; + } + + return result; + } + + private IEnumerable> AsEnumerable() + { + if (HasExpirableItems()) + { + // TODO: Check the performance of this implementation + return hash.Where(x => !IsExpired(x.Key)); + } + + return hash; + } + + private void Add(byte[] key, byte[] value) + { + DeleteExpiredItems(); + hash.Add(key, value); + } + + private void Set(byte[] key, byte[] value) + { + DeleteExpiredItems(); + hash[key] = value; + } + + private int SetExpire(byte[] key, long expiration, ExpireOption expireOption) + { + if (!ContainsKey(key)) + { + return -2; + } + + if (expiration <= DateTimeOffset.UtcNow.Ticks) + { + Remove(key, out _); + return 2; + } + + if (expirationTimes is null) + { + expirationTimes = new Dictionary(ByteArrayComparer.Instance); + expirationQueue = new PriorityQueue(); + } + + if (expirationTimes.TryGetValue(key, out var currentExpiration)) + { + if (expireOption.HasFlag(ExpireOption.NX)) + { + return 0; + } + + if (expireOption.HasFlag(ExpireOption.GT) && expiration <= currentExpiration) + { + return 0; + } + + if (expireOption.HasFlag(ExpireOption.LT) && expiration >= currentExpiration) + { + return 0; + } + } + else + { + if (expireOption.HasFlag(ExpireOption.XX)) + { + return 0; + } + } + + expirationTimes[key] = expiration; + expirationQueue.Enqueue(key, expiration); + return 1; + } + + private KeyValuePair ElementAt(int index) + { + if (HasExpirableItems()) + { + var currIndex = 0; + foreach (var item in AsEnumerable()) + { + if (currIndex == index) + { + return item; + } + } + + throw new ArgumentOutOfRangeException("index is outside the bounds of the source sequence."); + } + + return hash.ElementAt(index); + } } } \ No newline at end of file diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index 674aebfd08..f75be3f570 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -33,7 +33,7 @@ private void HashGet(ref ObjectInput input, ref SpanByteAndMemory output) { var key = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray(); - if (hash.TryGetValue(key, out var hashValue)) + if (TryGetValue(key, out var hashValue)) { while (!RespWriteUtils.WriteBulkString(hashValue, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); @@ -75,7 +75,7 @@ private void HashMultipleGet(ref ObjectInput input, ref SpanByteAndMemory output { var key = input.parseState.GetArgSliceByRef(i).SpanByte.ToByteArray(); - if (hash.TryGetValue(key, out var hashValue)) + if (TryGetValue(key, out var hashValue)) { while (!RespWriteUtils.WriteBulkString(hashValue, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); @@ -115,16 +115,16 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output) { if (respProtocolVersion < 3) { - while (!RespWriteUtils.WriteArrayLength(hash.Count * 2, ref curr, end)) + while (!RespWriteUtils.WriteArrayLength(Count() * 2, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } else { - while (!RespWriteUtils.WriteMapLength(hash.Count, ref curr, end)) + while (!RespWriteUtils.WriteMapLength(Count(), ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } - foreach (var item in hash) + foreach (var item in AsEnumerable()) { while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); @@ -151,7 +151,7 @@ private void HashDelete(ref ObjectInput input, byte* output) { var key = input.parseState.GetArgSliceByRef(i).SpanByte.ToByteArray(); - if (hash.Remove(key, out var hashValue)) + if (Remove(key, out var hashValue)) { _output->result1++; this.UpdateSize(key, hashValue, false); @@ -161,7 +161,7 @@ private void HashDelete(ref ObjectInput input, byte* output) private void HashLength(byte* output) { - ((ObjectOutputHeader*)output)->result1 = hash.Count; + ((ObjectOutputHeader*)output)->result1 = Count(); } private void HashStrLength(ref ObjectInput input, byte* output) @@ -170,7 +170,7 @@ private void HashStrLength(ref ObjectInput input, byte* output) *_output = default; var key = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray(); - _output->result1 = hash.TryGetValue(key, out var hashValue) ? hashValue.Length : 0; + _output->result1 = TryGetValue(key, out var hashValue) ? hashValue.Length : 0; } private void HashExists(ref ObjectInput input, byte* output) @@ -179,7 +179,7 @@ private void HashExists(ref ObjectInput input, byte* output) *_output = default; var field = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray(); - _output->result1 = hash.ContainsKey(field) ? 1 : 0; + _output->result1 = ContainsKey(field) ? 1 : 0; } private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output) @@ -204,11 +204,12 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output { if (includedCount) { - if (countParameter > 0 && countParameter > hash.Count) - countParameter = hash.Count; + var count = Count(); + if (countParameter > 0 && countParameter > count) + countParameter = count; var absCount = Math.Abs(countParameter); - var indexes = RandomUtils.PickKRandomIndexes(hash.Count, absCount, seed, countParameter > 0); + var indexes = RandomUtils.PickKRandomIndexes(count, absCount, seed, countParameter > 0); // Write the size of the array reply while (!RespWriteUtils.WriteArrayLength(withValues ? absCount * 2 : absCount, ref curr, end)) @@ -216,7 +217,7 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output foreach (var index in indexes) { - var pair = hash.ElementAt(index); + var pair = ElementAt(index); while (!RespWriteUtils.WriteBulkString(pair.Key, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); @@ -232,8 +233,8 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output else // No count parameter is present, we just return a random field { // Write a bulk string value of a random field from the hash value stored at key. - var index = RandomUtils.PickRandomIndex(hash.Count, seed); - var pair = hash.ElementAt(index); + var index = RandomUtils.PickRandomIndex(Count(), seed); + var pair = ElementAt(index); while (!RespWriteUtils.WriteBulkString(pair.Key, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); countDone = 1; @@ -262,16 +263,16 @@ private void HashSet(ref ObjectInput input, byte* output) var key = input.parseState.GetArgSliceByRef(i).SpanByte.ToByteArray(); var value = input.parseState.GetArgSliceByRef(i + 1).SpanByte.ToByteArray(); - if (!hash.TryGetValue(key, out var hashValue)) + if (!TryGetValue(key, out var hashValue)) { - hash.Add(key, value); + Add(key, value); this.UpdateSize(key, value); _output->result1++; } else if ((hop == HashOperation.HSET || hop == HashOperation.HMSET) && hashValue != default && !hashValue.AsSpan().SequenceEqual(value)) { - hash[key] = value; + Set(key, value); // Skip overhead as existing item is getting replaced. this.Size += Utility.RoundUp(value.Length, IntPtr.Size) - Utility.RoundUp(hashValue.Length, IntPtr.Size); @@ -281,7 +282,7 @@ private void HashSet(ref ObjectInput input, byte* output) private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory output) { - var count = hash.Count; + var count = Count(); var op = input.header.HashOp; var isMemory = false; @@ -297,7 +298,7 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou while (!RespWriteUtils.WriteArrayLength(count, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - foreach (var item in hash) + foreach (var item in AsEnumerable()) { if (HashOperation.HKEYS == op) { @@ -343,7 +344,7 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) var key = input.parseState.GetArgSliceByRef(0).SpanByte.ToByteArray(); var incrSlice = input.parseState.GetArgSliceByRef(1); - var valueExists = hash.TryGetValue(key, out var value); + var valueExists = TryGetValue(key, out var value); if (op == HashOperation.HINCRBY) { if (!NumUtils.TryParse(incrSlice.ReadOnlySpan, out int incr)) @@ -376,14 +377,14 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) resultSpan = resultSpan.Slice(0, bytesWritten); resultBytes = resultSpan.ToArray(); - hash[key] = resultBytes; + Set(key, resultBytes); Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) - Utility.RoundUp(value.Length, IntPtr.Size); } else { resultBytes = incrSlice.SpanByte.ToByteArray(); - hash.Add(key, resultBytes); + Add(key, resultBytes); UpdateSize(key, resultBytes); } @@ -417,14 +418,14 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) result += incr; resultBytes = Encoding.ASCII.GetBytes(result.ToString(CultureInfo.InvariantCulture)); - hash[key] = resultBytes; + Set(key, resultBytes); Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) - Utility.RoundUp(value.Length, IntPtr.Size); } else { resultBytes = incrSlice.SpanByte.ToByteArray(); - hash.Add(key, resultBytes); + Add(key, resultBytes); UpdateSize(key, resultBytes); } @@ -444,5 +445,43 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) output.Length = (int)(curr - ptr); } } + + private void HashExpire(ref ObjectInput input, ref SpanByteAndMemory output) + { + var isMemory = false; + MemoryHandle ptrHandle = default; + var ptr = output.SpanByte.ToPointer(); + + var curr = ptr; + var end = curr + output.Length; + + ObjectOutputHeader _output = default; + try + { + var expireOption = (ExpireOption)input.arg1; + var expiration = input.parseState.GetLong(0); + var numFields = input.parseState.Count - 1; + while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + + foreach (var item in input.parseState.Parameters) + { + var result = SetExpire(item.ToArray(), expiration, expireOption); + while (!RespWriteUtils.WriteInteger(result, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + _output.result1++; + } + + DeleteExpiredItems(); + } + finally + { + while (!RespWriteUtils.WriteDirect(ref _output, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + + if (isMemory) ptrHandle.Dispose(); + output.Length = (int)(curr - ptr); + } + } } } \ No newline at end of file diff --git a/libs/server/Resp/ClientCommands.cs b/libs/server/Resp/ClientCommands.cs index 2d1a7889d6..ec5c46d6e2 100644 --- a/libs/server/Resp/ClientCommands.cs +++ b/libs/server/Resp/ClientCommands.cs @@ -273,7 +273,7 @@ private bool NetworkCLIENTKILL() { if (!ParseUtils.TryReadLong(ref value, out var idParsed)) { - return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "client-id"))); + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrShouldBeGreaterThanZero, "client-id"))); } if (id is not null) diff --git a/libs/server/Resp/CmdStrings.cs b/libs/server/Resp/CmdStrings.cs index e2c48bf690..7dc974b165 100644 --- a/libs/server/Resp/CmdStrings.cs +++ b/libs/server/Resp/CmdStrings.cs @@ -120,6 +120,7 @@ static partial class CmdStrings public static ReadOnlySpan BYLEX => "BYLEX"u8; public static ReadOnlySpan REV => "REV"u8; public static ReadOnlySpan LIMIT => "LIMIT"u8; + public static ReadOnlySpan FIELDS => "FIELDS"u8; /// /// Response strings @@ -209,6 +210,7 @@ static partial class CmdStrings public static ReadOnlySpan RESP_ERR_GT_LT_NX_NOT_COMPATIBLE => "ERR GT, LT, and/or NX options at the same time are not compatible"u8; public static ReadOnlySpan RESP_ERR_INCR_SUPPORTS_ONLY_SINGLE_PAIR => "ERR INCR option supports a single increment-element pair"u8; public static ReadOnlySpan RESP_ERR_INVALID_BITFIELD_TYPE => "ERR Invalid bitfield type. Use something like i16 u8. Note that u64 is not supported but i64 is"u8; + public static ReadOnlySpan RESP_ERR_INVALID_EXPIRE_TIME => "ERR invalid expire time, must be >= 0"u8; /// /// Response string templates @@ -220,9 +222,11 @@ static partial class CmdStrings public const string GenericErrWrongNumArgsTxn = "ERR Invalid number of parameters to stored proc {0}, expected {1}, actual {2}"; public const string GenericSyntaxErrorOption = "ERR Syntax error in {0} option '{1}'"; - public const string GenericParamShouldBeGreaterThanZero = "ERR {0} should be greater than 0"; + public const string GenericParamShouldBeGreaterThanZero = "ERR Parameter `{0}` should be greater than 0"; public const string GenericErrCantBeNegative = "ERR {0} can't be negative"; public const string GenericErrShouldBeGreaterThanZero = "ERR {0} should be greater than 0"; + public const string GenericErrMandatoryMissing = "Mandatory argument {0} is missing or not at the right position"; + public const string GenericErrMustMatchNoOfArgs = "The `{0}` parameter must match the number of arguments"; public const string GenericUnknownClientType = "ERR Unknown client type '{0}'"; public const string GenericErrDuplicateFilter = "ERR Filter '{0}' defined multiple times"; public const string GenericPubSubCommandDisabled = "ERR {0} is disabled, enable it with --pubsub option."; diff --git a/libs/server/Resp/Objects/HashCommands.cs b/libs/server/Resp/Objects/HashCommands.cs index 818ae9bc9f..d8e149c66e 100644 --- a/libs/server/Resp/Objects/HashCommands.cs +++ b/libs/server/Resp/Objects/HashCommands.cs @@ -2,6 +2,8 @@ // Licensed under the MIT license. using System; +using System.Diagnostics; +using System.Text; using Garnet.common; using Tsavorite.core; @@ -569,5 +571,145 @@ private unsafe bool HashIncrement(RespCommand command, ref TGarnetAp } return true; } + + /// + /// Sets an expiration time for a field in the hash stored at key. + /// + /// + /// + /// + /// + private unsafe bool HashExpire(RespCommand command, ref TGarnetApi storageApi) + where TGarnetApi : IGarnetApi + { + if (storeWrapper.itemBroker == null) + throw new GarnetException("Object store is disabled"); + + if (parseState.Count <= 4) + { + return AbortWithWrongNumberOfArguments(nameof(RespCommand.HEXPIRE)); + } + + var key = parseState.GetArgSliceByRef(0); + + long expireAt = 0; + var isMilliseconds = false; + if (command == RespCommand.HEXPIRE || command == RespCommand.HPEXPIRE) + { + if (!parseState.TryGetInt(1, out var expireTime)) + { + return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER); + } + + if (expireTime < 0) + { + return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME); + } + expireAt = command == RespCommand.HEXPIRE ? DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expireTime : DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + expireTime; + isMilliseconds = command == RespCommand.HPEXPIRE; + } + else if (command == RespCommand.HEXPIREAT || command == RespCommand.HPEXPIREAT) + { + if (!parseState.TryGetLong(1, out expireAt)) + { + return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER); + } + if (expireAt < 0) + { + return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME); + } + isMilliseconds = command == RespCommand.HPEXPIREAT; + } + else + { + throw new UnreachableException("Can't reach this piece of code"); + } + + + var currIdx = 2; + if (parseState.TryGetExpireOption(currIdx, out var expireOption)) + { + currIdx++; // If expire option is present, move to next argument else continue with the current argument + } + + var fieldOption = parseState.GetArgSliceByRef(currIdx++); + if (!fieldOption.ReadOnlySpan.EqualsUpperCaseSpanIgnoringCase(CmdStrings.FIELDS)) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMandatoryMissing, "FIELDS"))); + } + + if (!parseState.TryGetInt(currIdx++, out var numFields)) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numFields"))); + } + + if (parseState.Count != currIdx + numFields) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMustMatchNoOfArgs, "numFields"))); + } + + var fieldsParseState = parseState.Slice(currIdx, numFields); + + // Prepare input + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HEXPIRE }; + var input = new ObjectInput(header, ref fieldsParseState); + + var outputFooter = new GarnetObjectStoreOutput { spanByteAndMemory = new SpanByteAndMemory(dcurr, (int)(dend - dcurr)) }; + + var status = storageApi.HashExpire(key, expireAt, isMilliseconds, expireOption, ref input, ref outputFooter); + + switch (status) + { + case GarnetStatus.WRONGTYPE: + while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend)) + SendAndReset(); + break; + default: + ProcessOutputWithHeader(outputFooter.spanByteAndMemory); + break; + } + + return true; + } + + /// + /// Prunes expired entries from the hash stored at key. + /// + /// + /// + /// + /// + /*private unsafe bool HashCollect(RespCommand command, ref TGarnetApi storageApi) + where TGarnetApi : IGarnetApi + { + if (parseState.Count != 1) + { + return AbortWithWrongNumberOfArguments("HCOLLECT"); + } + + var sbKey = parseState.GetArgSliceByRef(0).SpanByte; + var keyBytes = sbKey.ToByteArray(); + + // Prepare input + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; + var input = new ObjectInput(header, ref parseState, startIdx: 1); + + var status = storageApi.HashCollect(keyBytes, ref input, out int output); + + switch (status) + { + case GarnetStatus.WRONGTYPE: + while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend)) + SendAndReset(); + break; + default: + // Returns number of fields got pruned + while (!RespWriteUtils.WriteInteger(output, ref dcurr, dend)) + SendAndReset(); + break; + } + + return true; + }*/ } } \ No newline at end of file diff --git a/libs/server/Resp/Objects/ListCommands.cs b/libs/server/Resp/Objects/ListCommands.cs index 9163d852d4..bebc9faaf4 100644 --- a/libs/server/Resp/Objects/ListCommands.cs +++ b/libs/server/Resp/Objects/ListCommands.cs @@ -196,7 +196,7 @@ private unsafe bool ListPopMultiple(ref TGarnetApi storageApi) // Read count of keys if (!parseState.TryGetInt(currTokenId++, out var numKeys)) { - var err = string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numkeys"); + var err = string.Format(CmdStrings.GenericErrShouldBeGreaterThanZero, "numkeys"); return AbortWithErrorMessage(Encoding.ASCII.GetBytes(err)); } @@ -237,7 +237,7 @@ private unsafe bool ListPopMultiple(ref TGarnetApi storageApi) // Read count if (!parseState.TryGetInt(currTokenId, out popCount)) { - var err = string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "count"); + var err = string.Format(CmdStrings.GenericErrShouldBeGreaterThanZero, "count"); return AbortWithErrorMessage(Encoding.ASCII.GetBytes(err)); } } diff --git a/libs/server/Resp/Parser/RespCommand.cs b/libs/server/Resp/Parser/RespCommand.cs index 336c7d821c..896a52b722 100644 --- a/libs/server/Resp/Parser/RespCommand.cs +++ b/libs/server/Resp/Parser/RespCommand.cs @@ -104,6 +104,10 @@ public enum RespCommand : ushort GETEX, GETSET, HDEL, + HEXPIRE, + HPEXPIRE, + HEXPIREAT, + HPEXPIREAT, HINCRBY, HINCRBYFLOAT, HMSET, @@ -1208,6 +1212,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.HEXISTS; } + else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HEXPIRE\r"u8) && *(byte*)(ptr + 12) == '\n') + { + return RespCommand.HEXPIRE; + } else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HINCRBY\r"u8) && *(byte*)(ptr + 12) == '\n') { return RespCommand.HINCRBY; diff --git a/libs/server/Resp/RespServerSession.cs b/libs/server/Resp/RespServerSession.cs index f025e6cb3c..8ede1b8264 100644 --- a/libs/server/Resp/RespServerSession.cs +++ b/libs/server/Resp/RespServerSession.cs @@ -675,6 +675,10 @@ private bool ProcessArrayCommands(RespCommand cmd, ref TGarnetApi st RespCommand.HVALS => HashKeys(cmd, ref storageApi), RespCommand.HINCRBY => HashIncrement(cmd, ref storageApi), RespCommand.HINCRBYFLOAT => HashIncrement(cmd, ref storageApi), + RespCommand.HEXPIRE => HashExpire(cmd, ref storageApi), + RespCommand.HPEXPIRE => HashExpire(cmd, ref storageApi), + RespCommand.HEXPIREAT => HashExpire(cmd, ref storageApi), + RespCommand.HPEXPIREAT => HashExpire(cmd, ref storageApi), RespCommand.HSETNX => HashSet(cmd, ref storageApi), RespCommand.HRANDFIELD => HashRandomField(cmd, ref storageApi), RespCommand.HSCAN => ObjectScan(GarnetObjectType.Hash, ref storageApi), diff --git a/libs/server/Storage/Session/ObjectStore/HashOps.cs b/libs/server/Storage/Session/ObjectStore/HashOps.cs index aa61cee47e..fb7bb20678 100644 --- a/libs/server/Storage/Session/ObjectStore/HashOps.cs +++ b/libs/server/Storage/Session/ObjectStore/HashOps.cs @@ -2,6 +2,7 @@ // Licensed under the MIT license. using System; +using Garnet.common; using Tsavorite.core; namespace Garnet.server @@ -537,5 +538,36 @@ public GarnetStatus HashIncrement(byte[] key, ArgSlice input, ou public GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); + + /// + /// Sets the expiration time for the specified key. + /// + /// The type of the object context. + /// The key for which to set the expiration time. + /// The expiration time in ticks. + /// Indicates whether the expiration time is in milliseconds. + /// The expiration option to use. + /// The input object containing the operation details. + /// The output footer object to store the result. + /// The object context for the operation. + /// The status of the operation. + public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) + where TObjectContext : ITsavoriteContext + { + var expireAtUtc = isMilliseconds ? ConvertUtils.UnixTimestampInMillisecondsToTicks(expireAt) : ConvertUtils.UnixTimestampInSecondsToTicks(expireAt); + var expiryLength = NumUtils.NumDigitsInLong(expireAt); + var expirySlice = scratchBufferManager.CreateArgSlice(expiryLength); + var expirySpan = expirySlice.Span; + NumUtils.LongToSpanByte(expireAt, expirySpan); + + parseState.Initialize(1 + input.parseState.Count); + parseState.SetArgument(0, expirySlice); + parseState.SetArguments(1, input.parseState.Parameters); + + var innerInput = new ObjectInput(input.header, ref parseState, startIdx: 0, arg1: (int)expireOption); + + var status = RMWObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter); + return GarnetStatus.OK; + } } } \ No newline at end of file diff --git a/playground/CommandInfoUpdater/SupportedCommand.cs b/playground/CommandInfoUpdater/SupportedCommand.cs index f57a69e4d3..ea76fe58c6 100644 --- a/playground/CommandInfoUpdater/SupportedCommand.cs +++ b/playground/CommandInfoUpdater/SupportedCommand.cs @@ -138,6 +138,10 @@ public class SupportedCommand new("HDEL", RespCommand.HDEL), new("HELLO", RespCommand.HELLO), new("HEXISTS", RespCommand.HEXISTS), + new("HEXPIRE", RespCommand.HEXPIRE), + new("HPEXPIRE", RespCommand.HPEXPIRE), + new("HEXPIREAT", RespCommand.HEXPIREAT), + new("HPEXPIREAT", RespCommand.HPEXPIREAT), new("HGET", RespCommand.HGET), new("HGETALL", RespCommand.HGETALL), new("HINCRBY", RespCommand.HINCRBY), diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index e4a7b9b8d2..463d88759d 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -693,6 +693,152 @@ public void CheckHashOperationsOnWrongTypeObjectSE() RespTestsUtils.CheckCommandOnWrongTypeObjectSE(() => db.HashStringLength(keys[0], hashFields[0][0])); } + [Test] + public void CanDoHashExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + + var result = db.Execute("HEXPIRE", "myhash", "10", "FIELDS", "2", "field1", "field2"); + var results = (RedisResult[])result; + ClassicAssert.AreEqual(2, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); // field1 success + ClassicAssert.AreEqual(1, (long)results[1]); // field2 success + + var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "field2"); + ClassicAssert.AreEqual(2, ttl.Length); + ClassicAssert.IsTrue((long)ttl[0] <= 10); // field1 TTL + ClassicAssert.IsTrue((long)ttl[1] <= 10); // field2 TTL + } + + [Test] + [TestCase("NX", Description = "Set expiry only when no expiration exists")] + [TestCase("XX", Description = "Set expiry only when expiration exists")] + [TestCase("GT", Description = "Set expiry only when new TTL is greater")] + [TestCase("LT", Description = "Set expiry only when new TTL is less")] + public void CanDoHashExpireWithOptions(string option) + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + + // First set TTL for field1 only + db.Execute("HEXPIRE", "myhash", "20", "FIELDS", "1", "field1"); + + // Try setting TTL with option + var result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "10", option, "FIELDS", "2", "field1", "field2"); + + switch (option) + { + case "NX": + ClassicAssert.AreEqual(0L, (long)result[0]); // field1 has TTL + ClassicAssert.AreEqual(1L, (long)result[1]); // field2 no TTL + break; + case "XX": + ClassicAssert.AreEqual(1L, (long)result[0]); // field1 has TTL + ClassicAssert.AreEqual(0L, (long)result[1]); // field2 no TTL + break; + case "GT": + ClassicAssert.AreEqual(0L, (long)result[0]); // 10 < 20 + ClassicAssert.AreEqual(1L, (long)result[1]); // no TTL = infinite + break; + case "LT": + ClassicAssert.AreEqual(1L, (long)result[0]); // 10 < 20 + ClassicAssert.AreEqual(0L, (long)result[1]); // no TTL = infinite + break; + } + } + + [Test] + public void CanDoHashExpireAt() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + + var futureTime = DateTimeOffset.UtcNow.AddSeconds(30).ToUnixTimeSeconds(); + var result = (RedisResult[])db.Execute("HEXPIREAT", "myhash", futureTime.ToString(), "FIELDS", "2", "field1", "field2"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.AreEqual(1L, (long)result[0]); + ClassicAssert.AreEqual(1L, (long)result[1]); + + var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "field2"); + ClassicAssert.IsTrue((long)ttl[0] <= 30); + ClassicAssert.IsTrue((long)ttl[1] <= 30); + } + + [Test] + public void CanDoHashPreciseExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + + var result = (RedisResult[])db.Execute("HPEXPIRE", "myhash", "1000", "FIELDS", "2", "field1", "field2"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.AreEqual(1L, (long)result[0]); + ClassicAssert.AreEqual(1L, (long)result[1]); + + var pttl = (RedisResult[])db.Execute("HPTTL", "myhash", "FIELDS", "2", "field1", "field2"); + ClassicAssert.IsTrue((long)pttl[0] <= 1000); + ClassicAssert.IsTrue((long)pttl[1] <= 1000); + } + + [Test] + public void CanDoHashPreciseExpireAt() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + + var futureTimeMs = DateTimeOffset.UtcNow.AddSeconds(30).ToUnixTimeMilliseconds(); + var result = (RedisResult[])db.Execute("HPEXPIREAT", "myhash", futureTimeMs.ToString(), "FIELDS", "2", "field1", "field2"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.AreEqual(1L, (long)result[0]); + ClassicAssert.AreEqual(1L, (long)result[1]); + + var pttl = (RedisResult[])db.Execute("HPTTL", "myhash", "FIELDS", "2", "field1", "field2"); + ClassicAssert.IsTrue((long)pttl[0] <= 30000); + ClassicAssert.IsTrue((long)pttl[1] <= 30000); + } + + [Test] + public void TestHashExpireEdgeCases() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + // Test with non-existent key + var result = (RedisResult[])db.Execute("HEXPIRE", "nonexistent", "10", "FIELDS", "1", "field1"); + ClassicAssert.AreEqual(1, result.Length); + ClassicAssert.AreEqual(-2L, (long)result[0]); // Key doesn't exist + + // Test with non-existent fields + db.HashSet("myhash", "field1", "hello"); + result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "10", "FIELDS", "2", "field1", "nonexistent"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.AreEqual(1L, (long)result[0]); // Existing field + ClassicAssert.AreEqual(-2L, (long)result[1]); // Non-existent field + + // Test with zero TTL (should delete fields) + result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "0", "FIELDS", "1", "field1"); + ClassicAssert.AreEqual(1, result.Length); + ClassicAssert.AreEqual(1L, (long)result[0]); + ClassicAssert.IsFalse(db.HashExists("myhash", "field1")); + + // Test with negative TTL (should delete fields) + db.HashSet("myhash", "field1", "hello"); + result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "-1", "FIELDS", "1", "field1"); + ClassicAssert.AreEqual(1, result.Length); + ClassicAssert.AreEqual(1L, (long)result[0]); + ClassicAssert.IsFalse(db.HashExists("myhash", "field1")); + } + #endregion #region LightClientTests From 06e2d4c929ca2d51bc98105df031e8d8da19d8ea Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Mon, 9 Dec 2024 14:34:09 +0530 Subject: [PATCH 03/24] Initial impelementation of HEXIRE and family --- libs/resources/RespCommandsDocs.json | 185 ++++++++++++++++++ libs/resources/RespCommandsInfo.json | 125 ++++++++++++ libs/server/API/GarnetApiObjectCommands.cs | 8 + libs/server/API/GarnetWatchApi.cs | 7 + libs/server/API/IGarnetApi.cs | 20 ++ libs/server/Objects/Hash/HashObject.cs | 58 +++++- libs/server/Objects/Hash/HashObjectImpl.cs | 97 ++++++++- libs/server/Resp/Objects/HashCommands.cs | 173 ++++++++++++---- libs/server/Resp/Parser/RespCommand.cs | 37 ++++ libs/server/Resp/RespServerSession.cs | 5 + .../Storage/Session/ObjectStore/HashOps.cs | 39 +++- .../CommandInfoUpdater/SupportedCommand.cs | 5 + test/Garnet.test/RespHashTests.cs | 40 ++-- 13 files changed, 735 insertions(+), 64 deletions(-) diff --git a/libs/resources/RespCommandsDocs.json b/libs/resources/RespCommandsDocs.json index bc5073e233..153ef5bc62 100644 --- a/libs/resources/RespCommandsDocs.json +++ b/libs/resources/RespCommandsDocs.json @@ -2947,6 +2947,43 @@ ] } ] + }, + { + "Command": "HEXPIRETIME", + "Name": "HEXPIRETIME", + "Summary": "Returns the expiration time of a hash field as a Unix timestamp, in seconds.", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] }, { "Command": "HGET", @@ -3135,6 +3172,43 @@ } ] }, + { + "Command": "HPERSIST", + "Name": "HPERSIST", + "Summary": "Removes the expiration time for each specified field", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, { "Command": "HPEXPIRE", "Name": "HPEXPIRE", @@ -3293,6 +3367,80 @@ } ] }, + { + "Command": "HPEXPIRETIME", + "Name": "HPEXPIRETIME", + "Summary": "Returns the expiration time of a hash field as a Unix timestamp, in msec.", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, + { + "Command": "HPTTL", + "Name": "HPTTL", + "Summary": "Returns the TTL in milliseconds of a hash field.", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, { "Command": "HRANDFIELD", "Name": "HRANDFIELD", @@ -3455,6 +3603,43 @@ } ] }, + { + "Command": "HTTL", + "Name": "HTTL", + "Summary": "Returns the TTL in seconds of a hash field.", + "Group": "Hash", + "Complexity": "O(N) where N is the number of specified fields", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandKeyArgument", + "Name": "KEY", + "DisplayText": "key", + "Type": "Key", + "KeySpecIndex": 0 + }, + { + "TypeDiscriminator": "RespCommandContainerArgument", + "Name": "FIELDS", + "Type": "Block", + "Token": "FIELDS", + "Arguments": [ + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "NUMFIELDS", + "DisplayText": "numfields", + "Type": "Integer" + }, + { + "TypeDiscriminator": "RespCommandBasicArgument", + "Name": "FIELD", + "DisplayText": "field", + "Type": "String", + "ArgumentFlags": "Multiple" + } + ] + } + ] + }, { "Command": "HVALS", "Name": "HVALS", diff --git a/libs/resources/RespCommandsInfo.json b/libs/resources/RespCommandsInfo.json index 826c6cf00f..ef99037dfc 100644 --- a/libs/resources/RespCommandsInfo.json +++ b/libs/resources/RespCommandsInfo.json @@ -1646,6 +1646,31 @@ } ] }, + { + "Command": "HEXPIRETIME", + "Name": "HEXPIRETIME", + "Arity": -5, + "Flags": "Fast, ReadOnly", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Read", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RO, Access" + } + ] + }, { "Command": "HGET", "Name": "HGET", @@ -1852,6 +1877,31 @@ } ] }, + { + "Command": "HPERSIST", + "Name": "HPERSIST", + "Arity": -5, + "Flags": "Fast, Write", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Write", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RW, Update" + } + ] + }, { "Command": "HPEXPIRE", "Name": "HPEXPIRE", @@ -1902,6 +1952,56 @@ } ] }, + { + "Command": "HPEXPIRETIME", + "Name": "HPEXPIRETIME", + "Arity": -5, + "Flags": "Fast, ReadOnly", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Read", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RO, Access" + } + ] + }, + { + "Command": "HPTTL", + "Name": "HPTTL", + "Arity": -5, + "Flags": "Fast, ReadOnly", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Read", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RO, Access" + } + ] + }, { "Command": "HRANDFIELD", "Name": "HRANDFIELD", @@ -2033,6 +2133,31 @@ } ] }, + { + "Command": "HTTL", + "Name": "HTTL", + "Arity": -5, + "Flags": "Fast, ReadOnly", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Fast, Read", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RO, Access" + } + ] + }, { "Command": "HVALS", "Name": "HVALS", diff --git a/libs/server/API/GarnetApiObjectCommands.cs b/libs/server/API/GarnetApiObjectCommands.cs index 2cbdeb0b0c..4fdab60d67 100644 --- a/libs/server/API/GarnetApiObjectCommands.cs +++ b/libs/server/API/GarnetApiObjectCommands.cs @@ -473,10 +473,18 @@ public GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetO public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) => storageSession.HashExpire(key, expireAt, isMilliseconds, expireOption, ref input, ref outputFooter, ref objectContext); + /// + public GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) + => storageSession.HashPersist(key, ref input, ref outputFooter, ref objectContext); + /// public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items) => storageSession.ObjectScan(GarnetObjectType.Hash, key, cursor, match, count, out items, ref objectContext); + /// + public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) + => storageSession.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter, ref objectContext); + #endregion } diff --git a/libs/server/API/GarnetWatchApi.cs b/libs/server/API/GarnetWatchApi.cs index 87f506721f..2addcca70b 100644 --- a/libs/server/API/GarnetWatchApi.cs +++ b/libs/server/API/GarnetWatchApi.cs @@ -442,6 +442,13 @@ public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, return garnetApi.HashScan(key, cursor, match, count, out items); } + /// + public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) + { + garnetApi.WATCH(key, StoreType.Object); + return garnetApi.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter); + } + #endregion #region Bitmap Methods diff --git a/libs/server/API/IGarnetApi.cs b/libs/server/API/IGarnetApi.cs index c5ab27bd69..9e2967f9b1 100644 --- a/libs/server/API/IGarnetApi.cs +++ b/libs/server/API/IGarnetApi.cs @@ -963,6 +963,15 @@ public interface IGarnetApi : IGarnetReadApi, IGarnetAdvancedApi /// The status of the operation. GarnetStatus HashExpire(ArgSlice key, long expireAt, bool isMilliseconds, ExpireOption expireOption, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter); + /// + /// Persists the specified hash key, removing any expiration time set on it. + /// + /// The key of the hash to persist. + /// The input object containing additional parameters. + /// The output object to store the result. + /// The status of the operation. + GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter); + #endregion #region BitMaps Methods @@ -1600,6 +1609,17 @@ public interface IGarnetReadApi /// GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items); + /// + /// Returns the time to live for a hash key. + /// + /// The key of the hash. + /// Indicates if the time to live is in milliseconds. + /// Indicates if the time to live is a timestamp. + /// The input object containing additional parameters. + /// The output object to store the result. + /// The status of the operation. + GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter); + #endregion #region Bitmaps Methods diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index 4ddc8a160e..5c5dce6eb9 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -21,6 +21,8 @@ public enum HashOperation : byte { HCOLLECT, HEXPIRE, + HTTL, + HPERSIST, HGET, HMGET, HSET, @@ -198,6 +200,12 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory case HashOperation.HEXPIRE: HashExpire(ref input, ref output); break; + case HashOperation.HTTL: + HashTimeToLive(ref input, ref output); + break; + case HashOperation.HPERSIST: + HashPersist(ref input, ref output); + break; case HashOperation.HKEYS: HashGetKeysOrValues(ref input, ref output); break; @@ -345,6 +353,8 @@ private void DeleteExpiredItems() break; } } + + // TODO: Delete the hash set if all the fields are expired } private bool TryGetValue(byte[] key, out byte[] value) @@ -422,7 +432,7 @@ private void Set(byte[] key, byte[] value) hash[key] = value; } - private int SetExpire(byte[] key, long expiration, ExpireOption expireOption) + private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption) { if (!ContainsKey(key)) { @@ -431,7 +441,7 @@ private int SetExpire(byte[] key, long expiration, ExpireOption expireOption) if (expiration <= DateTimeOffset.UtcNow.Ticks) { - Remove(key, out _); + Persist(key); return 2; } @@ -464,6 +474,11 @@ private int SetExpire(byte[] key, long expiration, ExpireOption expireOption) { return 0; } + + if (expireOption.HasFlag(ExpireOption.GT)) + { + return 0; + } } expirationTimes[key] = expiration; @@ -471,6 +486,45 @@ private int SetExpire(byte[] key, long expiration, ExpireOption expireOption) return 1; } + private int Persist(byte[] key) + { + if (!ContainsKey(key)) + { + return -2; + } + + if (expirationTimes is not null && expirationTimes.TryGetValue(key, out var currentExpiration)) + { + expirationTimes.Remove(key); + expirationQueue.TryDequeue(out key, out _); + + if (expirationTimes.Count == 0) + { + expirationTimes = null; + expirationQueue = null; + } + + return 1; + } + + return -1; + } + + private long GetExpiration(byte[] key) + { + if (!ContainsKey(key)) + { + return -2; + } + + if (expirationTimes.TryGetValue(key, out var expiration)) + { + return expiration; + } + + return -1; + } + private KeyValuePair ElementAt(int index) { if (HasExpirableItems()) diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index f75be3f570..0d522b6d7d 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -458,21 +458,116 @@ private void HashExpire(ref ObjectInput input, ref SpanByteAndMemory output) ObjectOutputHeader _output = default; try { + DeleteExpiredItems(); + var expireOption = (ExpireOption)input.arg1; var expiration = input.parseState.GetLong(0); var numFields = input.parseState.Count - 1; while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + foreach (var item in input.parseState.Parameters.Slice(1)) + { + var result = SetExpiration(item.ToArray(), expiration, expireOption); + while (!RespWriteUtils.WriteInteger(result, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + _output.result1++; + } + } + finally + { + while (!RespWriteUtils.WriteDirect(ref _output, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + + if (isMemory) ptrHandle.Dispose(); + output.Length = (int)(curr - ptr); + } + } + + private void HashTimeToLive(ref ObjectInput input, ref SpanByteAndMemory output) + { + var isMemory = false; + MemoryHandle ptrHandle = default; + var ptr = output.SpanByte.ToPointer(); + + var curr = ptr; + var end = curr + output.Length; + + ObjectOutputHeader _output = default; + try + { + DeleteExpiredItems(); + + var isMilliseconds = input.arg1 == 1; + var isTimestamp = input.arg2 == 1; + var numFields = input.parseState.Count; + while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + foreach (var item in input.parseState.Parameters) { - var result = SetExpire(item.ToArray(), expiration, expireOption); + var result = GetExpiration(item.ToArray()); + + if (result >= 0) + { + if (isTimestamp && isMilliseconds) + { + result = ConvertUtils.UnixTimeInMillisecondsFromTicks(result); + } + else if (isTimestamp && !isMilliseconds) + { + result = ConvertUtils.UnixTimeInSecondsFromTicks(result); + } + else if (!isTimestamp && isMilliseconds) + { + result = ConvertUtils.MillisecondsFromDiffUtcNowTicks(result); + } + else if (!isTimestamp && !isMilliseconds) + { + result = ConvertUtils.SecondsFromDiffUtcNowTicks(result); + } + } + while (!RespWriteUtils.WriteInteger(result, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); _output.result1++; } + } + finally + { + while (!RespWriteUtils.WriteDirect(ref _output, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + + if (isMemory) ptrHandle.Dispose(); + output.Length = (int)(curr - ptr); + } + } + + private void HashPersist(ref ObjectInput input, ref SpanByteAndMemory output) + { + var isMemory = false; + MemoryHandle ptrHandle = default; + var ptr = output.SpanByte.ToPointer(); + + var curr = ptr; + var end = curr + output.Length; + ObjectOutputHeader _output = default; + try + { DeleteExpiredItems(); + + var numFields = input.parseState.Count; + while (!RespWriteUtils.WriteArrayLength(numFields, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + + foreach (var item in input.parseState.Parameters) + { + var result = Persist(item.ToArray()); + while (!RespWriteUtils.WriteInteger(result, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + _output.result1++; + } } finally { diff --git a/libs/server/Resp/Objects/HashCommands.cs b/libs/server/Resp/Objects/HashCommands.cs index d8e149c66e..9582341d5d 100644 --- a/libs/server/Resp/Objects/HashCommands.cs +++ b/libs/server/Resp/Objects/HashCommands.cs @@ -587,45 +587,40 @@ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi s if (parseState.Count <= 4) { - return AbortWithWrongNumberOfArguments(nameof(RespCommand.HEXPIRE)); + return AbortWithWrongNumberOfArguments(command.ToString()); } var key = parseState.GetArgSliceByRef(0); long expireAt = 0; var isMilliseconds = false; - if (command == RespCommand.HEXPIRE || command == RespCommand.HPEXPIRE) + if (!parseState.TryGetLong(1, out var expireTime)) { - if (!parseState.TryGetInt(1, out var expireTime)) - { - return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER); - } - - if (expireTime < 0) - { - return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME); - } - expireAt = command == RespCommand.HEXPIRE ? DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expireTime : DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + expireTime; - isMilliseconds = command == RespCommand.HPEXPIRE; + return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER); } - else if (command == RespCommand.HEXPIREAT || command == RespCommand.HPEXPIREAT) + + if (expireTime < 0) { - if (!parseState.TryGetLong(1, out expireAt)) - { - return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER); - } - if (expireAt < 0) - { - return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME); - } - isMilliseconds = command == RespCommand.HPEXPIREAT; + return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME); } - else + + switch (command) { - throw new UnreachableException("Can't reach this piece of code"); + case RespCommand.HEXPIRE: + expireAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expireTime; + isMilliseconds = false; + break; + case RespCommand.HPEXPIRE: + expireAt = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + expireTime; + isMilliseconds = true; + break; + case RespCommand.HPEXPIREAT: + isMilliseconds = true; + break; + default: // RespCommand.HEXPIREAT + break; } - var currIdx = 2; if (parseState.TryGetExpireOption(currIdx, out var expireOption)) { @@ -673,28 +668,71 @@ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi s } /// - /// Prunes expired entries from the hash stored at key. + /// Returns the time to live (TTL) for the specified fields in the hash stored at the given key. /// - /// - /// - /// - /// - /*private unsafe bool HashCollect(RespCommand command, ref TGarnetApi storageApi) + /// The type of the storage API. + /// The RESP command indicating the type of TTL operation. + /// The storage API instance to interact with the underlying storage. + /// True if the operation was successful; otherwise, false. + /// Thrown when the object store is disabled. + private unsafe bool HashTimeToLive(RespCommand command, ref TGarnetApi storageApi) where TGarnetApi : IGarnetApi { - if (parseState.Count != 1) + if (storeWrapper.itemBroker == null) + throw new GarnetException("Object store is disabled"); + + if (parseState.Count <= 3) { - return AbortWithWrongNumberOfArguments("HCOLLECT"); + return AbortWithWrongNumberOfArguments(command.ToString()); } - var sbKey = parseState.GetArgSliceByRef(0).SpanByte; - var keyBytes = sbKey.ToByteArray(); + var key = parseState.GetArgSliceByRef(0); + + var fieldOption = parseState.GetArgSliceByRef(1); + if (!fieldOption.ReadOnlySpan.EqualsUpperCaseSpanIgnoringCase(CmdStrings.FIELDS)) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMandatoryMissing, "FIELDS"))); + } + + if (!parseState.TryGetInt(2, out var numFields)) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numFields"))); + } + + if (parseState.Count != 3 + numFields) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMustMatchNoOfArgs, "numFields"))); + } + + var isMilliseconds = false; + var isTimestamp = false; + switch (command) + { + case RespCommand.HPTTL: + isMilliseconds = true; + isTimestamp = false; + break; + case RespCommand.HEXPIRETIME: + isMilliseconds = false; + isTimestamp = true; + break; + case RespCommand.HPEXPIRETIME: + isMilliseconds = true; + isTimestamp = true; + break; + default: // RespCommand.HTTL + break; + } + + var fieldsParseState = parseState.Slice(3, numFields); // Prepare input - var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; - var input = new ObjectInput(header, ref parseState, startIdx: 1); + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HTTL }; + var input = new ObjectInput(header, ref fieldsParseState); - var status = storageApi.HashCollect(keyBytes, ref input, out int output); + var outputFooter = new GarnetObjectStoreOutput { spanByteAndMemory = new SpanByteAndMemory(dcurr, (int)(dend - dcurr)) }; + + var status = storageApi.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter); switch (status) { @@ -703,13 +741,64 @@ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi s SendAndReset(); break; default: - // Returns number of fields got pruned - while (!RespWriteUtils.WriteInteger(output, ref dcurr, dend)) + ProcessOutputWithHeader(outputFooter.spanByteAndMemory); + break; + } + + return true; + } + + private unsafe bool HashPersist(ref TGarnetApi storageApi) + where TGarnetApi : IGarnetApi + { + if (storeWrapper.itemBroker == null) + throw new GarnetException("Object store is disabled"); + + if (parseState.Count <= 3) + { + return AbortWithWrongNumberOfArguments(nameof(RespCommand.HPERSIST)); + } + + var key = parseState.GetArgSliceByRef(0); + + var fieldOption = parseState.GetArgSliceByRef(1); + if (!fieldOption.ReadOnlySpan.EqualsUpperCaseSpanIgnoringCase(CmdStrings.FIELDS)) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMandatoryMissing, "FIELDS"))); + } + + if (!parseState.TryGetInt(2, out var numFields)) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericParamShouldBeGreaterThanZero, "numFields"))); + } + + if (parseState.Count != 3 + numFields) + { + return AbortWithErrorMessage(Encoding.ASCII.GetBytes(string.Format(CmdStrings.GenericErrMustMatchNoOfArgs, "numFields"))); + } + + var fieldsParseState = parseState.Slice(3, numFields); + + // Prepare input + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HPERSIST }; + var input = new ObjectInput(header, ref fieldsParseState); + + var outputFooter = new GarnetObjectStoreOutput { spanByteAndMemory = new SpanByteAndMemory(dcurr, (int)(dend - dcurr)) }; + + var status = storageApi.HashPersist(key, ref input, ref outputFooter); + + switch (status) + { + case GarnetStatus.WRONGTYPE: + while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend)) SendAndReset(); break; + default: + ProcessOutputWithHeader(outputFooter.spanByteAndMemory); + break; } return true; - }*/ + } } } \ No newline at end of file diff --git a/libs/server/Resp/Parser/RespCommand.cs b/libs/server/Resp/Parser/RespCommand.cs index 896a52b722..a7c0a0dd2b 100644 --- a/libs/server/Resp/Parser/RespCommand.cs +++ b/libs/server/Resp/Parser/RespCommand.cs @@ -45,6 +45,10 @@ public enum RespCommand : ushort HSTRLEN, HVALS, KEYS, + HTTL, + HPTTL, + HEXPIRETIME, + HPEXPIRETIME, LINDEX, LLEN, LPOS, @@ -108,6 +112,7 @@ public enum RespCommand : ushort HPEXPIRE, HEXPIREAT, HPEXPIREAT, + HPERSIST, HINCRBY, HINCRBYFLOAT, HMSET, @@ -795,6 +800,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.HLEN; } + else if (*(ulong*)(ptr + 2) == MemoryMarshal.Read("\r\nHTTL\r\n"u8)) + { + return RespCommand.HTTL; + } break; case 'K': @@ -969,6 +978,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.HSCAN; } + else if (*(ulong*)(ptr + 3) == MemoryMarshal.Read("\nHPTTL\r\n"u8)) + { + return RespCommand.HPTTL; + } break; case 'L': @@ -1300,6 +1313,14 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.EXPIREAT; } + else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HPEXPIRE"u8) && *(ushort*)(ptr + 12) == MemoryMarshal.Read("\r\n"u8)) + { + return RespCommand.HPEXPIRE; + } + else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HPERSIST"u8) && *(ushort*)(ptr + 12) == MemoryMarshal.Read("\r\n"u8)) + { + return RespCommand.HPERSIST; + } break; case 9: if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("SUBSCRIB"u8) && *(uint*)(ptr + 11) == MemoryMarshal.Read("BE\r\n"u8)) @@ -1330,6 +1351,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.PEXPIREAT; } + else if (*(ulong*)(ptr + 4) == MemoryMarshal.Read("HEXPIREA"u8) && *(uint*)(ptr + 11) == MemoryMarshal.Read("AT\r\n"u8)) + { + return RespCommand.HEXPIREAT; + } break; } @@ -1391,6 +1416,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.BRPOPLPUSH; } + else if (*(ulong*)(ptr + 1) == MemoryMarshal.Read("10\r\nHPEX"u8) && *(uint*)(ptr + 9) == MemoryMarshal.Read("PIREAT\r\n"u8)) + { + return RespCommand.HPEXPIREAT; + } break; case 11: @@ -1426,6 +1455,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.INCRBYFLOAT; } + else if (*(ulong*)(ptr + 2) == MemoryMarshal.Read("1\r\nHEXPI"u8) && *(ulong*)(ptr + 10) == MemoryMarshal.Read("RETIME\r\n"u8)) + { + return RespCommand.HEXPIRETIME; + } break; case 12: @@ -1437,6 +1470,10 @@ private RespCommand FastParseArrayCommand(ref int count, ref ReadOnlySpan { return RespCommand.HINCRBYFLOAT; } + else if (*(ulong*)(ptr + 3) == MemoryMarshal.Read("\r\nHPEXPI"u8) && *(ulong*)(ptr + 11) == MemoryMarshal.Read("RETIME\r\n"u8)) + { + return RespCommand.HPEXPIRETIME; + } break; case 13: diff --git a/libs/server/Resp/RespServerSession.cs b/libs/server/Resp/RespServerSession.cs index 8ede1b8264..5e529c437c 100644 --- a/libs/server/Resp/RespServerSession.cs +++ b/libs/server/Resp/RespServerSession.cs @@ -679,6 +679,11 @@ private bool ProcessArrayCommands(RespCommand cmd, ref TGarnetApi st RespCommand.HPEXPIRE => HashExpire(cmd, ref storageApi), RespCommand.HEXPIREAT => HashExpire(cmd, ref storageApi), RespCommand.HPEXPIREAT => HashExpire(cmd, ref storageApi), + RespCommand.HTTL => HashTimeToLive(cmd, ref storageApi), + RespCommand.HPTTL => HashTimeToLive(cmd, ref storageApi), + RespCommand.HEXPIRETIME => HashTimeToLive(cmd, ref storageApi), + RespCommand.HPEXPIRETIME => HashTimeToLive(cmd, ref storageApi), + RespCommand.HPERSIST => HashPersist(ref storageApi), RespCommand.HSETNX => HashSet(cmd, ref storageApi), RespCommand.HRANDFIELD => HashRandomField(cmd, ref storageApi), RespCommand.HSCAN => ObjectScan(GarnetObjectType.Hash, ref storageApi), diff --git a/libs/server/Storage/Session/ObjectStore/HashOps.cs b/libs/server/Storage/Session/ObjectStore/HashOps.cs index fb7bb20678..b2e9a9a972 100644 --- a/libs/server/Storage/Session/ObjectStore/HashOps.cs +++ b/libs/server/Storage/Session/ObjectStore/HashOps.cs @@ -555,10 +555,10 @@ public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool where TObjectContext : ITsavoriteContext { var expireAtUtc = isMilliseconds ? ConvertUtils.UnixTimestampInMillisecondsToTicks(expireAt) : ConvertUtils.UnixTimestampInSecondsToTicks(expireAt); - var expiryLength = NumUtils.NumDigitsInLong(expireAt); + var expiryLength = NumUtils.NumDigitsInLong(expireAtUtc); var expirySlice = scratchBufferManager.CreateArgSlice(expiryLength); var expirySpan = expirySlice.Span; - NumUtils.LongToSpanByte(expireAt, expirySpan); + NumUtils.LongToSpanByte(expireAtUtc, expirySpan); parseState.Initialize(1 + input.parseState.Count); parseState.SetArgument(0, expirySlice); @@ -566,8 +566,39 @@ public GarnetStatus HashExpire(ArgSlice key, long expireAt, bool var innerInput = new ObjectInput(input.header, ref parseState, startIdx: 0, arg1: (int)expireOption); - var status = RMWObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter); - return GarnetStatus.OK; + return RMWObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter); } + + /// + /// Returns the time-to-live (TTL) of a hash key. + /// + /// The type of the object context. + /// The key of the hash. + /// Indicates whether the TTL is in milliseconds. + /// Indicates whether the TTL is a timestamp. + /// The input object containing the operation details. + /// The output footer object to store the result. + /// The object context for the operation. + /// The status of the operation. + public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) + where TObjectContext : ITsavoriteContext + { + var innerInput = new ObjectInput(input.header, ref input.parseState, arg1: isMilliseconds ? 1 : 0, arg2: isTimestamp ? 1 : 0); + + return RMWObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter); + } + + /// + /// Removes the expiration time from a hash key, making it persistent. + /// + /// The type of the object context. + /// The key of the hash. + /// The input object containing the operation details. + /// The output footer object to store the result. + /// The object context for the operation. + /// The status of the operation. + public GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) + where TObjectContext : ITsavoriteContext + => RMWObjectStoreOperationWithOutput(key.ToArray(), ref input, ref objectContext, ref outputFooter); } } \ No newline at end of file diff --git a/playground/CommandInfoUpdater/SupportedCommand.cs b/playground/CommandInfoUpdater/SupportedCommand.cs index ea76fe58c6..bb16763fbf 100644 --- a/playground/CommandInfoUpdater/SupportedCommand.cs +++ b/playground/CommandInfoUpdater/SupportedCommand.cs @@ -142,6 +142,11 @@ public class SupportedCommand new("HPEXPIRE", RespCommand.HPEXPIRE), new("HEXPIREAT", RespCommand.HEXPIREAT), new("HPEXPIREAT", RespCommand.HPEXPIREAT), + new("HTTL", RespCommand.HTTL), + new("HPTTL", RespCommand.HPTTL), + new("HEXPIRETIME", RespCommand.HEXPIRETIME), + new("HPEXPIRETIME", RespCommand.HPEXPIRETIME), + new("HPERSIST", RespCommand.HPERSIST), new("HGET", RespCommand.HGET), new("HGETALL", RespCommand.HGETALL), new("HINCRBY", RespCommand.HINCRBY), diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 463d88759d..efe32a23cd 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -694,13 +694,13 @@ public void CheckHashOperationsOnWrongTypeObjectSE() } [Test] - public void CanDoHashExpire() + public async Task CanDoHashExpire() { using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); var db = redis.GetDatabase(0); - db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "new")]); - var result = db.Execute("HEXPIRE", "myhash", "10", "FIELDS", "2", "field1", "field2"); + var result = db.Execute("HEXPIRE", "myhash", "2", "FIELDS", "2", "field1", "field2"); var results = (RedisResult[])result; ClassicAssert.AreEqual(2, results.Length); ClassicAssert.AreEqual(1, (long)results[0]); // field1 success @@ -708,13 +708,22 @@ public void CanDoHashExpire() var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "field2"); ClassicAssert.AreEqual(2, ttl.Length); - ClassicAssert.IsTrue((long)ttl[0] <= 10); // field1 TTL - ClassicAssert.IsTrue((long)ttl[1] <= 10); // field2 TTL + ClassicAssert.LessOrEqual((long)ttl[0], 10); + ClassicAssert.Greater((long)ttl[0], 0); + ClassicAssert.LessOrEqual((long)ttl[1], 10); + ClassicAssert.Greater((long)ttl[1], 0); + + await Task.Delay(2000); + + var items = db.HashGetAll("myhash"); + ClassicAssert.AreEqual(1, items.Length); + ClassicAssert.AreEqual("field3", items[0].Name.ToString()); + ClassicAssert.AreEqual("new", items[0].Value.ToString()); } [Test] [TestCase("NX", Description = "Set expiry only when no expiration exists")] - [TestCase("XX", Description = "Set expiry only when expiration exists")] + [TestCase("XX", Description = "Set expiry only when expiration exists")] [TestCase("GT", Description = "Set expiry only when new TTL is greater")] [TestCase("LT", Description = "Set expiry only when new TTL is less")] public void CanDoHashExpireWithOptions(string option) @@ -733,20 +742,21 @@ public void CanDoHashExpireWithOptions(string option) switch (option) { case "NX": - ClassicAssert.AreEqual(0L, (long)result[0]); // field1 has TTL - ClassicAssert.AreEqual(1L, (long)result[1]); // field2 no TTL + ClassicAssert.AreEqual(0, (long)result[0]); // field1 has TTL + ClassicAssert.AreEqual(1, (long)result[1]); // field2 no TTL break; case "XX": - ClassicAssert.AreEqual(1L, (long)result[0]); // field1 has TTL - ClassicAssert.AreEqual(0L, (long)result[1]); // field2 no TTL + ClassicAssert.AreEqual(1, (long)result[0]); // field1 has TTL + ClassicAssert.AreEqual(0, (long)result[1]); // field2 no TTL break; case "GT": - ClassicAssert.AreEqual(0L, (long)result[0]); // 10 < 20 - ClassicAssert.AreEqual(1L, (long)result[1]); // no TTL = infinite + // TODO: add 3rd field to check valid greater than + ClassicAssert.AreEqual(0, (long)result[0]); // 10 < 20 + ClassicAssert.AreEqual(0, (long)result[1]); // no TTL = infinite break; case "LT": - ClassicAssert.AreEqual(1L, (long)result[0]); // 10 < 20 - ClassicAssert.AreEqual(0L, (long)result[1]); // no TTL = infinite + ClassicAssert.AreEqual(1, (long)result[0]); // 10 < 20 + ClassicAssert.AreEqual(1, (long)result[1]); // no TTL = infinite break; } } @@ -839,7 +849,7 @@ public void TestHashExpireEdgeCases() ClassicAssert.IsFalse(db.HashExists("myhash", "field1")); } - #endregion + #endregion #region LightClientTests From 35e385fa2254d430b11ae419401069a8bd72f9b9 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Wed, 11 Dec 2024 14:10:57 +0530 Subject: [PATCH 04/24] Added more test cases --- libs/server/Objects/Hash/HashObject.cs | 28 +- libs/server/Objects/Hash/HashObjectImpl.cs | 28 +- libs/server/Resp/Objects/HashCommands.cs | 8 +- test/Garnet.test/RespHashTests.cs | 537 ++++++++++++++++----- 4 files changed, 470 insertions(+), 131 deletions(-) diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index 5c5dce6eb9..3b51171bfa 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -341,10 +341,18 @@ private void DeleteExpiredItems() while (expiration < DateTimeOffset.UtcNow.Ticks) { - expirationQueue.TryDequeue(out key, out _); - expirationTimes.Remove(key); - hash.Remove(key); - // TODO: Update size + if (expirationTimes.TryGetValue(key, out var actualExpiration) && actualExpiration == expiration) + { + hash.Remove(key); + expirationTimes.Remove(key); + expirationQueue.Dequeue(); + } + else + { + expirationQueue.Dequeue(); + } + + // TODO: Update size based on if or else condition hasValue = expirationQueue.TryPeek(out key, out expiration); if (!hasValue) { @@ -353,8 +361,6 @@ private void DeleteExpiredItems() break; } } - - // TODO: Delete the hash set if all the fields are expired } private bool TryGetValue(byte[] key, out byte[] value) @@ -427,6 +433,13 @@ private void Add(byte[] key, byte[] value) } private void Set(byte[] key, byte[] value) + { + DeleteExpiredItems(); + hash[key] = value; + Persist(key); + } + + private void SetWithoutPersist(byte[] key, byte[] value) { DeleteExpiredItems(); hash[key] = value; @@ -441,7 +454,7 @@ private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption if (expiration <= DateTimeOffset.UtcNow.Ticks) { - Persist(key); + Remove(key, out _); return 2; } @@ -496,7 +509,6 @@ private int Persist(byte[] key) if (expirationTimes is not null && expirationTimes.TryGetValue(key, out var currentExpiration)) { expirationTimes.Remove(key); - expirationQueue.TryDequeue(out key, out _); if (expirationTimes.Count == 0) { diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index 0d522b6d7d..956fb76124 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -205,6 +205,15 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output if (includedCount) { var count = Count(); + + if (count == 0) // This can happen because of expiration but RMW operation haven't applied yet + { + while (!RespWriteUtils.WriteEmptyArray(ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + _output.result1 = 0; + return; + } + if (countParameter > 0 && countParameter > count) countParameter = count; @@ -233,7 +242,16 @@ private void HashRandomField(ref ObjectInput input, ref SpanByteAndMemory output else // No count parameter is present, we just return a random field { // Write a bulk string value of a random field from the hash value stored at key. - var index = RandomUtils.PickRandomIndex(Count(), seed); + var count = Count(); + if (count == 0) // This can happen because of expiration but RMW operation haven't applied yet + { + while (!RespWriteUtils.WriteNull(ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + _output.result1 = 0; + return; + } + + var index = RandomUtils.PickRandomIndex(count, seed); var pair = ElementAt(index); while (!RespWriteUtils.WriteBulkString(pair.Key, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); @@ -269,9 +287,9 @@ private void HashSet(ref ObjectInput input, byte* output) this.UpdateSize(key, value); _output->result1++; } - else if ((hop == HashOperation.HSET || hop == HashOperation.HMSET) && hashValue != default && - !hashValue.AsSpan().SequenceEqual(value)) + else if ((hop == HashOperation.HSET || hop == HashOperation.HMSET) && hashValue != default) { + // TODO: Update size to remove expiration Set(key, value); // Skip overhead as existing item is getting replaced. this.Size += Utility.RoundUp(value.Length, IntPtr.Size) - @@ -377,7 +395,7 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) resultSpan = resultSpan.Slice(0, bytesWritten); resultBytes = resultSpan.ToArray(); - Set(key, resultBytes); + SetWithoutPersist(key, resultBytes); Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) - Utility.RoundUp(value.Length, IntPtr.Size); } @@ -418,7 +436,7 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) result += incr; resultBytes = Encoding.ASCII.GetBytes(result.ToString(CultureInfo.InvariantCulture)); - Set(key, resultBytes); + SetWithoutPersist(key, resultBytes); Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) - Utility.RoundUp(value.Length, IntPtr.Size); } diff --git a/libs/server/Resp/Objects/HashCommands.cs b/libs/server/Resp/Objects/HashCommands.cs index 9582341d5d..8cb1774570 100644 --- a/libs/server/Resp/Objects/HashCommands.cs +++ b/libs/server/Resp/Objects/HashCommands.cs @@ -594,12 +594,12 @@ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi s long expireAt = 0; var isMilliseconds = false; - if (!parseState.TryGetLong(1, out var expireTime)) + if (!parseState.TryGetLong(1, out expireAt)) { return AbortWithErrorMessage(CmdStrings.RESP_ERR_GENERIC_VALUE_IS_NOT_INTEGER); } - if (expireTime < 0) + if (expireAt < 0) { return AbortWithErrorMessage(CmdStrings.RESP_ERR_INVALID_EXPIRE_TIME); } @@ -607,11 +607,11 @@ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi s switch (command) { case RespCommand.HEXPIRE: - expireAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expireTime; + expireAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expireAt; isMilliseconds = false; break; case RespCommand.HPEXPIRE: - expireAt = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + expireTime; + expireAt = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + expireAt; isMilliseconds = true; break; case RespCommand.HPEXPIREAT: diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index efe32a23cd..6a01179c4c 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -57,6 +57,33 @@ public void CanSetAndGetOnePair() ClassicAssert.AreEqual("Tsavorite", r); } + [Test] + public async Task CanSetAndGetOnePairWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + string r = db.HashGet("user:user1", "Title"); + ClassicAssert.AreEqual("Tsavorite", r); + await Task.Delay(110); + r = db.HashGet("user:user1", "Title"); + ClassicAssert.IsNull(r); + } + + [Test] + public async Task CanSetWithExpireAndRemoveExpireByCallingSetAgain() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]); + await Task.Delay(110); + string r = db.HashGet("user:user1", "Title"); + ClassicAssert.AreEqual("Tsavorite", r); + } + [Test] public void CanSetAndGetOnePairLarge() { @@ -116,6 +143,19 @@ public void CanDelSingleField() ClassicAssert.AreEqual("2021", resultGet); } + [Test] + public void CanDelWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + var result = db.HashDelete(new RedisKey("user:user1"), new RedisValue("Title")); + ClassicAssert.AreEqual(true, result); + string resultGet = db.HashGet("user:user1", "Year"); + ClassicAssert.AreEqual("2021", resultGet); + } + [Test] public void CanDeleteMultipleFields() @@ -153,6 +193,23 @@ public void CanDoHLen() ClassicAssert.AreEqual(3, result); } + [Test] + public async Task CanDoHLenWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + var result = db.HashLength("user:user1"); + ClassicAssert.AreEqual(3, result); + await Task.Delay(110); + result = db.HashLength("user:user1"); + ClassicAssert.AreEqual(2, result); + db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field + result = db.HashLength("user:user1"); + ClassicAssert.AreEqual(2, result); + } + [Test] public void CanDoGetAll() { @@ -167,6 +224,33 @@ public void CanDoGetAll() ClassicAssert.IsTrue(hashEntries.OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name))); } + [Test] + public async Task CanDoGetAllWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + HashEntry[] hashEntries = + [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]; + db.HashSet("user:user1", hashEntries); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + + var result = db.HashGetAll("user:user1"); + ClassicAssert.AreEqual(hashEntries.Length, result.Length); + ClassicAssert.AreEqual(hashEntries.Length, result.Select(r => r.Name).Distinct().Count()); + ClassicAssert.IsTrue(hashEntries.OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name))); + + await Task.Delay(110); + + result = db.HashGetAll("user:user1"); + ClassicAssert.AreEqual(hashEntries.Length - 1, result.Length); + ClassicAssert.IsTrue(hashEntries.Skip(1).OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name))); + + db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field + result = db.HashGetAll("user:user1"); + ClassicAssert.AreEqual(hashEntries.Length - 1, result.Length); + ClassicAssert.IsTrue(hashEntries.Skip(1).Select(x => x.Value == "2021" ? new HashEntry(x.Name, "new2021") : x).OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name))); + } + [Test] public void CanDoHExists() { @@ -180,6 +264,27 @@ public void CanDoHExists() ClassicAssert.AreEqual(false, result); } + [Test] + public async Task CanDoHExistsWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + + var result = db.HashExists(new RedisKey("user:user1"), "Title"); + ClassicAssert.IsTrue(result); + + await Task.Delay(110); + + result = db.HashExists(new RedisKey("user:user1"), "Title"); + ClassicAssert.IsFalse(result); + + db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field + result = db.HashExists(new RedisKey("user:user1"), "Title"); + ClassicAssert.IsFalse(result); + } + [Test] public void CanDoHStrLen() { @@ -194,6 +299,27 @@ public void CanDoHStrLen() ClassicAssert.AreEqual(0, r, 0); } + [Test] + public async Task CanDoHStrLenWithExire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + + long r = db.HashStringLength("user:user1", "Title"); + ClassicAssert.AreEqual(9, r); + + await Task.Delay(110); + + r = db.HashStringLength("user:user1", "Title"); + ClassicAssert.AreEqual(0, r); + + db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field + r = db.HashStringLength("user:user1", "Title"); + ClassicAssert.AreEqual(0, r); + } + [Test] public void CanDoHKeys() { @@ -208,6 +334,34 @@ public void CanDoHKeys() ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company"))); } + [Test] + public async Task CanDoHKeysWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + var result = db.HashKeys("user:user1"); + ClassicAssert.AreEqual(3, result.Length); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Title"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company"))); + + await Task.Delay(110); + + result = db.HashKeys("user:user1"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company"))); + + db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field + + result = db.HashKeys("user:user1"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company"))); + } + [Test] public void CanDoHVals() @@ -223,6 +377,34 @@ public void CanDoHVals() ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme"))); } + [Test] + public async Task CanDoHValsWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite"), new HashEntry("Year", "2021"), new HashEntry("Company", "Acme")]); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + var result = db.HashValues("user:user1"); + ClassicAssert.AreEqual(3, result.Length); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Tsavorite"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("2021"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme"))); + + await Task.Delay(110); + + result = db.HashValues("user:user1"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("2021"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme"))); + + db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field + + result = db.HashValues("user:user1"); + ClassicAssert.AreEqual(2, result.Length); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("new2021"))); + ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme"))); + } + [Test] public void CanDoHIncrBy() @@ -241,6 +423,22 @@ public void CanDoHIncrBy() ClassicAssert.AreEqual(4, ((int?)getResult)); } + [Test] + public async Task CanDoHIncrByWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]); + db.HashFieldExpire("user:user1", ["Field2"], TimeSpan.FromMilliseconds(100)); + var result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field2"), -4); + ClassicAssert.AreEqual(-3, result); + + await Task.Delay(110); + + result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field2"), -4); + ClassicAssert.AreEqual(-4, result); + } + [Test] public void CanDoHIncrByLTM() { @@ -283,6 +481,22 @@ public void CheckHashIncrementDoublePrecision() ClassicAssert.AreEqual(3.3333333333, result, 1e-15); } + [Test] + public async Task CheckHashIncrementDoublePrecisionWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("user:user1", [new HashEntry("Field1", "1.1111111111")]); + db.HashFieldExpire("user:user1", ["Field1"], TimeSpan.FromMilliseconds(100)); + var result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field1"), 2.2222222222); + ClassicAssert.AreEqual(3.3333333333, result, 1e-15); + + await Task.Delay(110); + + result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field1"), 2.2222222222); + ClassicAssert.AreEqual(2.2222222222, result, 1e-15); + } + [Test] public void CanDoHSETNXCommand() { @@ -296,6 +510,21 @@ public void CanDoHSETNXCommand() ClassicAssert.AreEqual("Hello", result); } + [Test] + public async Task CanDoHSETNXCommandWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet(new RedisKey("user:user1"), new RedisValue("Field"), new RedisValue("Hello")); + db.HashFieldExpire("user:user1", ["Field"], TimeSpan.FromMilliseconds(100)); + db.HashSet(new RedisKey("user:user1"), new RedisValue("Field"), new RedisValue("Hello"), When.NotExists); + + await Task.Delay(110); + + string result = db.HashGet("user:user1", "Field"); + ClassicAssert.IsNull(result); // SetNX should not reset the expiration + } + [Test] public void CanDoRandomField() { @@ -362,6 +591,45 @@ public void CanDoRandomField() ClassicAssert.IsTrue(fieldsWithValues.All(e => hashDict.ContainsKey(e.Name) && hashDict[e.Name] == e.Value)); } + [Test] + public async Task CanDoRandomFieldWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + var hashKey = new RedisKey("user:user1"); + HashEntry[] hashEntries = [new HashEntry("Title", "Tsavorite")]; + db.HashSet(hashKey, hashEntries); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + string field = db.HashRandomField(hashKey); + ClassicAssert.AreEqual(field, "Title"); + + await Task.Delay(110); + + field = db.HashRandomField(hashKey); + ClassicAssert.IsNull(field); + } + + [Test] + public async Task CanDoRandomFieldsWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + var hashKey = new RedisKey("user:user1"); + HashEntry[] hashEntries = [new HashEntry("Title", "Tsavorite")]; + db.HashSet(hashKey, hashEntries); + db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); + var field = db.HashRandomFields(hashKey, 10).Select(x => (string)x).ToArray(); + ClassicAssert.AreEqual(field.Length, 1); + ClassicAssert.AreEqual("Title", field[0]); + + await Task.Delay(110); + + field = db.HashRandomFields(hashKey, 10).Select(x => (string)x).ToArray(); + ClassicAssert.AreEqual(field.Length, 0); + } + [Test] public void HashRandomFieldEmptyHash() { @@ -435,6 +703,27 @@ public void CanDoHashScan() CollectionAssert.AreEquivalent(new[] { "email", "email1" }, fieldsStr); } + [Test] + public async Task CanDoHashScanWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + db.HashSet("user:user789", [new HashEntry("email", "email@example.com"), new HashEntry("email1", "email1@example.com"), new HashEntry("email2", "email2@example.com"), new HashEntry("email3", "email3@example.com"), new HashEntry("age", "25")]); + db.HashFieldExpire("user:user789", ["email"], TimeSpan.FromMilliseconds(100)); + + var members = db.HashScan("user:user789", "email*"); + ClassicAssert.IsTrue(((IScanningCursor)members).Cursor == 0); + ClassicAssert.IsTrue(members.Count() == 4, "HSCAN with MATCH failed."); + + await Task.Delay(110); + + // HSCAN with match + members = db.HashScan("user:user789", "email*"); + ClassicAssert.IsTrue(((IScanningCursor)members).Cursor == 0); + ClassicAssert.IsTrue(members.Count() == 3, "HSCAN with MATCH failed."); + } + [Test] public void CanDoHashScanWithCursor() @@ -517,6 +806,32 @@ public async Task CanDoHMGET() #nullable disable } + [Test] + public async Task CanDoHMGETWithExpire() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + db.HashSet("user:user789", [new HashEntry("email", "email@example.com"), new HashEntry("email1", "email1@example.com"), new HashEntry("email2", "email2@example.com"), new HashEntry("email3", "email3@example.com"), new HashEntry("age", "25")]); + db.HashFieldExpire("user:user789", ["email"], TimeSpan.FromMilliseconds(100)); + + var members = (string[])db.Execute("HMGET", "user:user789", "email", "email1"); + ClassicAssert.AreEqual("email@example.com", members[0]); + ClassicAssert.AreEqual("email1@example.com", members[1]); + + await Task.Delay(110); + + members = (string[])db.Execute("HMGET", "user:user789", "email", "email1"); + ClassicAssert.IsNull(members[0]); + ClassicAssert.AreEqual("email1@example.com", members[1]); + + db.HashSet("user:user789", [new HashEntry("email2", "newemail2@example.com")]); // Trigger deletion of expired field + + members = (string[])db.Execute("HMGET", "user:user789", "email", "email1"); + ClassicAssert.IsNull(members[0]); + ClassicAssert.AreEqual("email1@example.com", members[1]); + } + [Test] public async Task CanDoHGETALL() @@ -698,157 +1013,151 @@ public async Task CanDoHashExpire() { using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); var db = redis.GetDatabase(0); - db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "new")]); + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "value3"), new HashEntry("field4", "value4"), new HashEntry("field5", "value5"), new HashEntry("field6", "value6")]); - var result = db.Execute("HEXPIRE", "myhash", "2", "FIELDS", "2", "field1", "field2"); + var result = db.Execute("HEXPIRE", "myhash", "3", "FIELDS", "3", "field1", "field5", "nonexistfield"); var results = (RedisResult[])result; + ClassicAssert.AreEqual(3, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); + ClassicAssert.AreEqual(1, (long)results[1]); + ClassicAssert.AreEqual(-2, (long)results[2]); + + result = db.Execute("HPEXPIRE", "myhash", "3000", "FIELDS", "2", "field2", "nonexistfield"); + results = (RedisResult[])result; ClassicAssert.AreEqual(2, results.Length); - ClassicAssert.AreEqual(1, (long)results[0]); // field1 success - ClassicAssert.AreEqual(1, (long)results[1]); // field2 success + ClassicAssert.AreEqual(1, (long)results[0]); + ClassicAssert.AreEqual(-2, (long)results[1]); - var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "field2"); + result = db.Execute("HEXPIREAT", "myhash", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(), "FIELDS", "2", "field3", "nonexistfield"); + results = (RedisResult[])result; + ClassicAssert.AreEqual(2, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); + ClassicAssert.AreEqual(-2, (long)results[1]); + + result = db.Execute("HPEXPIREAT", "myhash", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(), "FIELDS", "2", "field4", "nonexistfield"); + results = (RedisResult[])result; + ClassicAssert.AreEqual(2, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); + ClassicAssert.AreEqual(-2, (long)results[1]); + + var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "nonexistfield"); + ClassicAssert.AreEqual(2, ttl.Length); + ClassicAssert.LessOrEqual((long)ttl[0], 3); + ClassicAssert.Greater((long)ttl[0], 1); + ClassicAssert.AreEqual(-2, (long)results[1]); + + ttl = (RedisResult[])db.Execute("HPTTL", "myhash", "FIELDS", "2", "field1", "nonexistfield"); + ClassicAssert.AreEqual(2, ttl.Length); + ClassicAssert.LessOrEqual((long)ttl[0], 3000); + ClassicAssert.Greater((long)ttl[0], 1000); + ClassicAssert.AreEqual(-2, (long)results[1]); + + ttl = (RedisResult[])db.Execute("HEXPIRETIME", "myhash", "FIELDS", "2", "field1", "nonexistfield"); ClassicAssert.AreEqual(2, ttl.Length); - ClassicAssert.LessOrEqual((long)ttl[0], 10); - ClassicAssert.Greater((long)ttl[0], 0); - ClassicAssert.LessOrEqual((long)ttl[1], 10); - ClassicAssert.Greater((long)ttl[1], 0); + ClassicAssert.LessOrEqual((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds()); + ClassicAssert.Greater((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeSeconds()); + ClassicAssert.AreEqual(-2, (long)results[1]); - await Task.Delay(2000); + ttl = (RedisResult[])db.Execute("HPEXPIRETIME", "myhash", "FIELDS", "2", "field1", "nonexistfield"); + ClassicAssert.AreEqual(2, ttl.Length); + ClassicAssert.LessOrEqual((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds()); + ClassicAssert.Greater((long)ttl[0], DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeMilliseconds()); + ClassicAssert.AreEqual(-2, (long)results[1]); + + results = (RedisResult[])db.Execute("HPERSIST", "myhash", "FIELDS", "3", "field5", "field6", "nonexistfield"); + ClassicAssert.AreEqual(3, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); // 1 the expiration was removed. + ClassicAssert.AreEqual(-1, (long)results[1]); // -1 if the field exists but has no associated expiration set. + ClassicAssert.AreEqual(-2, (long)results[2]); + + await Task.Delay(3000); var items = db.HashGetAll("myhash"); - ClassicAssert.AreEqual(1, items.Length); - ClassicAssert.AreEqual("field3", items[0].Name.ToString()); - ClassicAssert.AreEqual("new", items[0].Value.ToString()); + ClassicAssert.AreEqual(2, items.Length); + ClassicAssert.AreEqual("field5", items[0].Name.ToString()); + ClassicAssert.AreEqual("value5", items[0].Value.ToString()); + ClassicAssert.AreEqual("field6", items[1].Name.ToString()); + ClassicAssert.AreEqual("value6", items[1].Value.ToString()); + + result = db.Execute("HEXPIRE", "myhash", "0", "FIELDS", "1", "field5"); + results = (RedisResult[])result; + ClassicAssert.AreEqual(1, results.Length); + ClassicAssert.AreEqual(2, (long)results[0]); + + result = db.Execute("HEXPIREAT", "myhash", DateTimeOffset.UtcNow.AddSeconds(-1).ToUnixTimeSeconds().ToString(), "FIELDS", "1", "field6"); + results = (RedisResult[])result; + ClassicAssert.AreEqual(1, results.Length); + ClassicAssert.AreEqual(2, (long)results[0]); + + items = db.HashGetAll("myhash"); + ClassicAssert.AreEqual(0, items.Length); } [Test] - [TestCase("NX", Description = "Set expiry only when no expiration exists")] - [TestCase("XX", Description = "Set expiry only when expiration exists")] - [TestCase("GT", Description = "Set expiry only when new TTL is greater")] - [TestCase("LT", Description = "Set expiry only when new TTL is less")] - public void CanDoHashExpireWithOptions(string option) + [TestCase("HEXPIRE", "NX", Description = "Set expiry only when no expiration exists")] + [TestCase("HEXPIRE", "XX", Description = "Set expiry only when expiration exists")] + [TestCase("HEXPIRE", "GT", Description = "Set expiry only when new TTL is greater")] + [TestCase("HEXPIRE", "LT", Description = "Set expiry only when new TTL is less")] + [TestCase("HPEXPIRE", "NX", Description = "Set expiry only when no expiration exists")] + [TestCase("HPEXPIRE", "XX", Description = "Set expiry only when expiration exists")] + [TestCase("HPEXPIRE", "GT", Description = "Set expiry only when new TTL is greater")] + [TestCase("HPEXPIRE", "LT", Description = "Set expiry only when new TTL is less")] + [TestCase("HEXPIREAT", "NX", Description = "Set expiry only when no expiration exists")] + [TestCase("HEXPIREAT", "XX", Description = "Set expiry only when expiration exists")] + [TestCase("HEXPIREAT", "GT", Description = "Set expiry only when new TTL is greater")] + [TestCase("HEXPIREAT", "LT", Description = "Set expiry only when new TTL is less")] + [TestCase("HPEXPIREAT", "NX", Description = "Set expiry only when no expiration exists")] + [TestCase("HPEXPIREAT", "XX", Description = "Set expiry only when expiration exists")] + [TestCase("HPEXPIREAT", "GT", Description = "Set expiry only when new TTL is greater")] + [TestCase("HPEXPIREAT", "LT", Description = "Set expiry only when new TTL is less")] + public void CanDoHashExpireWithOptions(string command, string option) { using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); var db = redis.GetDatabase(0); - - db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); + + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "welcome"), new HashEntry("field4", "back")]); + + (var expireTimeField1, var expireTimeField3, var newExpireTimeField) = command switch + { + "HEXPIRE" => ("1", "3", "2"), + "HPEXPIRE" => ("1000", "3000", "2000"), + "HEXPIREAT" => (DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeSeconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(2).ToUnixTimeSeconds().ToString()), + "HPEXPIREAT" => (DateTimeOffset.UtcNow.AddSeconds(1).ToUnixTimeMilliseconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(), DateTimeOffset.UtcNow.AddSeconds(2).ToUnixTimeMilliseconds().ToString()), + _ => throw new ArgumentException("Invalid command") + }; // First set TTL for field1 only - db.Execute("HEXPIRE", "myhash", "20", "FIELDS", "1", "field1"); + db.Execute(command, "myhash", expireTimeField1, "FIELDS", "1", "field1"); + db.Execute(command, "myhash", expireTimeField3, "FIELDS", "1", "field3"); // Try setting TTL with option - var result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "10", option, "FIELDS", "2", "field1", "field2"); - + var result = (RedisResult[])db.Execute(command, "myhash", newExpireTimeField, option, "FIELDS", "3", "field1", "field2", "field3"); + switch (option) { case "NX": ClassicAssert.AreEqual(0, (long)result[0]); // field1 has TTL ClassicAssert.AreEqual(1, (long)result[1]); // field2 no TTL + ClassicAssert.AreEqual(0, (long)result[2]); // field1 has TTL break; case "XX": ClassicAssert.AreEqual(1, (long)result[0]); // field1 has TTL ClassicAssert.AreEqual(0, (long)result[1]); // field2 no TTL + ClassicAssert.AreEqual(1, (long)result[2]); // field1 has TTL break; case "GT": - // TODO: add 3rd field to check valid greater than - ClassicAssert.AreEqual(0, (long)result[0]); // 10 < 20 + ClassicAssert.AreEqual(1, (long)result[0]); // 20 > 10 ClassicAssert.AreEqual(0, (long)result[1]); // no TTL = infinite + ClassicAssert.AreEqual(0, (long)result[2]); // 20 !> 30 break; case "LT": - ClassicAssert.AreEqual(1, (long)result[0]); // 10 < 20 + ClassicAssert.AreEqual(0, (long)result[0]); // 20 !< 10 ClassicAssert.AreEqual(1, (long)result[1]); // no TTL = infinite + ClassicAssert.AreEqual(1, (long)result[2]); // 20 < 30 break; } } - [Test] - public void CanDoHashExpireAt() - { - using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); - var db = redis.GetDatabase(0); - - db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); - - var futureTime = DateTimeOffset.UtcNow.AddSeconds(30).ToUnixTimeSeconds(); - var result = (RedisResult[])db.Execute("HEXPIREAT", "myhash", futureTime.ToString(), "FIELDS", "2", "field1", "field2"); - ClassicAssert.AreEqual(2, result.Length); - ClassicAssert.AreEqual(1L, (long)result[0]); - ClassicAssert.AreEqual(1L, (long)result[1]); - - var ttl = (RedisResult[])db.Execute("HTTL", "myhash", "FIELDS", "2", "field1", "field2"); - ClassicAssert.IsTrue((long)ttl[0] <= 30); - ClassicAssert.IsTrue((long)ttl[1] <= 30); - } - - [Test] - public void CanDoHashPreciseExpire() - { - using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); - var db = redis.GetDatabase(0); - - db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); - - var result = (RedisResult[])db.Execute("HPEXPIRE", "myhash", "1000", "FIELDS", "2", "field1", "field2"); - ClassicAssert.AreEqual(2, result.Length); - ClassicAssert.AreEqual(1L, (long)result[0]); - ClassicAssert.AreEqual(1L, (long)result[1]); - - var pttl = (RedisResult[])db.Execute("HPTTL", "myhash", "FIELDS", "2", "field1", "field2"); - ClassicAssert.IsTrue((long)pttl[0] <= 1000); - ClassicAssert.IsTrue((long)pttl[1] <= 1000); - } - - [Test] - public void CanDoHashPreciseExpireAt() - { - using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); - var db = redis.GetDatabase(0); - - db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world")]); - - var futureTimeMs = DateTimeOffset.UtcNow.AddSeconds(30).ToUnixTimeMilliseconds(); - var result = (RedisResult[])db.Execute("HPEXPIREAT", "myhash", futureTimeMs.ToString(), "FIELDS", "2", "field1", "field2"); - ClassicAssert.AreEqual(2, result.Length); - ClassicAssert.AreEqual(1L, (long)result[0]); - ClassicAssert.AreEqual(1L, (long)result[1]); - - var pttl = (RedisResult[])db.Execute("HPTTL", "myhash", "FIELDS", "2", "field1", "field2"); - ClassicAssert.IsTrue((long)pttl[0] <= 30000); - ClassicAssert.IsTrue((long)pttl[1] <= 30000); - } - - [Test] - public void TestHashExpireEdgeCases() - { - using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); - var db = redis.GetDatabase(0); - - // Test with non-existent key - var result = (RedisResult[])db.Execute("HEXPIRE", "nonexistent", "10", "FIELDS", "1", "field1"); - ClassicAssert.AreEqual(1, result.Length); - ClassicAssert.AreEqual(-2L, (long)result[0]); // Key doesn't exist - - // Test with non-existent fields - db.HashSet("myhash", "field1", "hello"); - result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "10", "FIELDS", "2", "field1", "nonexistent"); - ClassicAssert.AreEqual(2, result.Length); - ClassicAssert.AreEqual(1L, (long)result[0]); // Existing field - ClassicAssert.AreEqual(-2L, (long)result[1]); // Non-existent field - - // Test with zero TTL (should delete fields) - result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "0", "FIELDS", "1", "field1"); - ClassicAssert.AreEqual(1, result.Length); - ClassicAssert.AreEqual(1L, (long)result[0]); - ClassicAssert.IsFalse(db.HashExists("myhash", "field1")); - - // Test with negative TTL (should delete fields) - db.HashSet("myhash", "field1", "hello"); - result = (RedisResult[])db.Execute("HEXPIRE", "myhash", "-1", "FIELDS", "1", "field1"); - ClassicAssert.AreEqual(1, result.Length); - ClassicAssert.AreEqual(1L, (long)result[0]); - ClassicAssert.IsFalse(db.HashExists("myhash", "field1")); - } - #endregion #region LightClientTests From c27de0dc361ca8e0bc75f3a3d9c4f7b1bfa0bdf1 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Wed, 11 Dec 2024 17:57:29 +0530 Subject: [PATCH 05/24] Handling serialization and Deserialization --- libs/server/Objects/Hash/HashObject.cs | 95 +++++++++++++++----------- 1 file changed, 56 insertions(+), 39 deletions(-) diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index 3b51171bfa..6f733c8909 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -51,6 +51,9 @@ public unsafe partial class HashObject : GarnetObjectBase Dictionary expirationTimes; PriorityQueue expirationQueue; + // Byte #31 is used to denote if key has expiration (1) or not (0) + private const int ExpirationBitMask = 1 << 31; + /// /// Constructor /// @@ -66,34 +69,37 @@ public HashObject(long expiration = 0) public HashObject(BinaryReader reader) : base(reader, MemoryUtils.DictionaryOverhead) { - // TODO: Handle deserialization of expiration times hash = new Dictionary(ByteArrayComparer.Instance); int count = reader.ReadInt32(); for (int i = 0; i < count; i++) { - var item = reader.ReadBytes(reader.ReadInt32()); + var keyLength = reader.ReadInt32(); + var hasExpiration = (keyLength & ExpirationBitMask) != 0; + keyLength &= ~ExpirationBitMask; + var item = reader.ReadBytes(keyLength); var value = reader.ReadBytes(reader.ReadInt32()); - hash.Add(item, value); - - this.UpdateSize(item, value); - } - int expireCount = reader.ReadInt32(); - // TODO: Can we delete expired items during serialization and deserialization? - if (expireCount > 0) - { - expirationTimes = new Dictionary(ByteArrayComparer.Instance); - expirationQueue = new PriorityQueue(); - for (int i = 0; i < count; i++) + if (hasExpiration) { - var item = reader.ReadBytes(reader.ReadInt32()); - var value = reader.ReadInt64(); - expirationTimes.Add(item, value); - expirationQueue.Enqueue(item, value); - - // TODO: Update size + var expiration = reader.ReadInt64(); + var isExpired = expiration < DateTimeOffset.UtcNow.Ticks; + if (!isExpired) + { + hash.Add(item, value); + expirationTimes ??= new Dictionary(ByteArrayComparer.Instance); + expirationQueue ??= new PriorityQueue(); + expirationTimes.Add(item, expiration); + expirationQueue.Enqueue(item, expiration); + // TODO: Update size + } + } + else + { + hash.Add(item, value); } + + this.UpdateSize(item, value); } } @@ -116,10 +122,23 @@ public override void DoSerialize(BinaryWriter writer) { base.DoSerialize(writer); - int count = hash.Count; + DeleteExpiredItems(); + + int count = hash.Count; // Since expired items are already deleted, no need to worry about expiring items writer.Write(count); foreach (var kvp in hash) { + if (expirationTimes is not null && expirationTimes.TryGetValue(kvp.Key, out var expiration)) + { + writer.Write(kvp.Key.Length | ExpirationBitMask); + writer.Write(kvp.Key); + writer.Write(kvp.Value.Length); + writer.Write(kvp.Value); + writer.Write(expiration); + count--; + continue; + } + writer.Write(kvp.Key.Length); writer.Write(kvp.Key); writer.Write(kvp.Value.Length); @@ -127,22 +146,6 @@ public override void DoSerialize(BinaryWriter writer) count--; } - if (expirationTimes is not null) - { - // TODO: Can we delete expired items during serialization and deserialization? - writer.Write(expirationTimes.Count); - foreach (var kvp in expirationTimes) - { - writer.Write(kvp.Key.Length); - writer.Write(kvp.Key); - writer.Write(kvp.Value); - } - } - else - { - // TODO: This will break backward compatibility, Do we need to handle this? - writer.Write(0); - } Debug.Assert(count == 0); } @@ -419,11 +422,25 @@ private IEnumerable> AsEnumerable() { if (HasExpirableItems()) { - // TODO: Check the performance of this implementation - return hash.Where(x => !IsExpired(x.Key)); + return GetNonExpiredItems(); } - return hash; + return hash.AsEnumerable(); + } + + /// + /// Use `AsEnumerable` instead of this method to avoid checking for expired items if there is no expiring item + /// + /// + private IEnumerable> GetNonExpiredItems() + { + foreach (var item in hash) + { + if (!IsExpired(item.Key)) + { + yield return item; + } + } } private void Add(byte[] key, byte[] value) From a650ba9ad3bc3bb7e38624cf38109e40d40d2948 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 01:57:11 +0530 Subject: [PATCH 06/24] Added LTM test cases --- test/Garnet.test/RespHashTests.cs | 64 ++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 6a01179c4c..8d28cbc6cf 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -7,6 +7,7 @@ using System.Threading.Tasks; using Garnet.server; using NUnit.Framework; +using NUnit.Framework.Interfaces; using NUnit.Framework.Legacy; using StackExchange.Redis; @@ -23,7 +24,7 @@ public class RespHashTests public void Setup() { TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); - server = TestUtils.CreateGarnetServer(TestUtils.MethodTestDir, lowMemory: true); + server = TestUtils.CreateGarnetServer(TestUtils.MethodTestDir, enableReadCache: true, enableObjectStoreReadCache: true, lowMemory: true); server.Start(); } @@ -1093,6 +1094,67 @@ public async Task CanDoHashExpire() ClassicAssert.AreEqual(0, items.Length); } + [Test] + public async Task CanDoHashExpireLTM() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig(allowAdmin: true)); + var db = redis.GetDatabase(0); + var server = redis.GetServer(TestUtils.Address, TestUtils.Port); + + string[] smallExpireKeys = ["user:user0", "user:user1"]; + string[] largeExpireKeys = ["user:user2", "user:user3"]; + + foreach (var key in smallExpireKeys) + { + db.HashSet(key, [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]); + db.Execute("HEXPIRE", key, "2", "FIELDS", "1", "Field1"); + } + + foreach (var key in largeExpireKeys) + { + db.HashSet(key, [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]); + db.Execute("HEXPIRE", key, "4", "FIELDS", "1", "Field1"); + } + + // Create LTM (larger than memory) DB by inserting 100 keys + for (int i = 4; i < 100; i++) + { + var key = "user:user" + i; + db.HashSet(key, [new HashEntry("Field1", "StringValue"), new HashEntry("Field2", "1")]); + } + + var info = TestUtils.GetStoreAddressInfo(server, includeReadCache: true, isObjectStore: true); + // Ensure data has spilled to disk + ClassicAssert.Greater(info.HeadAddress, info.BeginAddress); + + await Task.Delay(2000); + + var result = db.HashExists(smallExpireKeys[0], "Field1"); + ClassicAssert.IsFalse(result); + result = db.HashExists(smallExpireKeys[1], "Field1"); + ClassicAssert.IsFalse(result); + result = db.HashExists(largeExpireKeys[0], "Field1"); + ClassicAssert.IsTrue(result); + result = db.HashExists(largeExpireKeys[1], "Field1"); + ClassicAssert.IsTrue(result); + var ttl = db.HashFieldGetTimeToLive(largeExpireKeys[0], ["Field1"]); + ClassicAssert.AreEqual(ttl.Length, 1); + ClassicAssert.Greater(ttl[0], 0); + ClassicAssert.LessOrEqual(ttl[0], 2000); + ttl = db.HashFieldGetTimeToLive(largeExpireKeys[1], ["Field1"]); + ClassicAssert.AreEqual(ttl.Length, 1); + ClassicAssert.Greater(ttl[0], 0); + ClassicAssert.LessOrEqual(ttl[0], 2000); + + await Task.Delay(2000); + + result = db.HashExists(largeExpireKeys[0], "Field1"); + ClassicAssert.IsFalse(result); + result = db.HashExists(largeExpireKeys[1], "Field1"); + ClassicAssert.IsFalse(result); + + } + [Test] [TestCase("HEXPIRE", "NX", Description = "Set expiry only when no expiration exists")] [TestCase("HEXPIRE", "XX", Description = "Set expiry only when expiration exists")] From 968e5a3e3341b2ee2a9d78dfbcf113ee1636f4a7 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 16:25:45 +0530 Subject: [PATCH 07/24] Added docs --- test/Garnet.test/RespHashTests.cs | 6 + website/docs/commands/api-compatibility.md | 18 +- website/docs/commands/data-structures.md | 216 +++++++++++++++++++++ 3 files changed, 231 insertions(+), 9 deletions(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 8d28cbc6cf..580774477c 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -1153,6 +1153,12 @@ public async Task CanDoHashExpireLTM() result = db.HashExists(largeExpireKeys[1], "Field1"); ClassicAssert.IsFalse(result); + var data = db.HashGetAll("user:user4"); + ClassicAssert.AreEqual(2, data.Length); + ClassicAssert.AreEqual("Field1", data[0].Name.ToString()); + ClassicAssert.AreEqual("StringValue", data[0].Value.ToString()); + ClassicAssert.AreEqual("Field2", data[1].Name.ToString()); + ClassicAssert.AreEqual("1", data[1].Value.ToString()); } [Test] diff --git a/website/docs/commands/api-compatibility.md b/website/docs/commands/api-compatibility.md index 3e9ba3f4bc..0451642b97 100644 --- a/website/docs/commands/api-compatibility.md +++ b/website/docs/commands/api-compatibility.md @@ -163,9 +163,9 @@ Note that this list is subject to change as we continue to expand our API comman | | [GEOSEARCHSTORE](data-structures.md#geosearchstore) | âž• | Partially Implemented | | **HASH** | [HDEL](data-structures.md#hdel) | âž• | | | | [HEXISTS](data-structures.md#hexists) | âž• | | -| | HEXPIRE | âž– | | -| | HEXPIREAT | âž– | | -| | HEXPIRETIME | âž– | | +| | [HEXPIRE](data-structures.md#hexpire) | âž• | | +| | [HEXPIREAT](data-structures.md#hexpireat) | âž• | | +| | [HEXPIRETIME](data-structures.md#hexpiretime) | âž• | | | | [HGET](data-structures.md#hget) | âž• | | | | [HGETALL](data-structures.md#hgetall) | âž• | | | | [HINCRBY](data-structures.md#hincrby) | âž• | | @@ -174,17 +174,17 @@ Note that this list is subject to change as we continue to expand our API comman | | [HLEN](data-structures.md#hlen) | âž• | | | | [HMGET](data-structures.md#hmget) | âž• | | | | [HMSET](data-structures.md#hmset) | âž• | (Deprecated) | -| | HPERSIST | âž– | | -| | HPEXPIRE | âž– | | -| | HPEXPIREAT | âž– | | -| | HPEXPIRETIME | âž– | | -| | HPTTL | âž– | | +| | [HPERSIST](data-structures.md#hpersist) | âž• | | +| | [HPEXPIRE](data-structures.md#hpexpire) | âž• | | +| | [HPEXPIREAT](data-structures.md#hpexpireat) | âž• | | +| | [HPEXPIRETIME](data-structures.md#hepxpiretime) | âž• | | +| | [HPTTL](data-structures.md#hpttl) | âž• | | | | [HRANDFIELD](data-structures.md#hrandfield) | âž• | | | | [HSCAN](data-structures.md#hscan) | âž• | | | | [HSET](data-structures.md#hset) | âž• | | | | [HSETNX](data-structures.md#hsetnx) | âž• | | | | [HSTRLEN](data-structures.md#hstrlen) | âž• | | -| | HTTL | âž– | | +| | [HTTL](data-structures.md#httl) | âž• | | | | [HVALS](data-structures.md#hvals) | âž• | | | **HYPERLOGLOG** | [PFADD](analytics.md#pfadd) | âž• | | | | [PFCOUNT](analytics.md#pfcount) | âž• | | diff --git a/website/docs/commands/data-structures.md b/website/docs/commands/data-structures.md index fccfe83e2e..39c22e4f8b 100644 --- a/website/docs/commands/data-structures.md +++ b/website/docs/commands/data-structures.md @@ -218,6 +218,222 @@ Returns all values in the hash stored at **key**. --- +### HEXPIRE + +#### Syntax + +```bash + HEXPIRE key seconds [NX | XX | GT | LT] FIELDS numfields field [field ...] +``` + +Sets a timeout on one or more fields of a hash key. After the timeout has expired, the fields will automatically be deleted. The timeout is specified in seconds. + +The command supports several options to control when the expiration should be set: + +* **NX:** Only set expiry on fields that have no existing expiry +* **XX:** Only set expiry on fields that already have an expiry set +* **GT:** Only set expiry when it's greater than the current expiry +* **LT:** Only set expiry when it's less than the current expiry + +The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive. + +#### Resp Reply + +Array reply: For each field, returns: + +* 1 if the timeout was set +* 0 if the field doesn't exist +* -1 if timeout was not set due to condition not being met + +--- + +### HEXPIREAT + +#### Syntax + +```bash + HEXPIREAT key unix-time-seconds [NX | XX | GT | LT] FIELDS numfields field [field ...] +``` + +Sets an absolute expiration time (Unix timestamp in seconds) for one or more hash fields. After the timestamp has passed, the fields will automatically be deleted. + +The command supports several options to control when the expiration should be set: + +* **NX:** Only set expiry on fields that have no existing expiry +* **XX:** Only set expiry on fields that already have an expiry set +* **GT:** Only set expiry when it's greater than the current expiry +* **LT:** Only set expiry when it's less than the current expiry + +The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive. + +#### Resp Reply + +Array reply: For each field, returns: + +* 1 if the timeout was set +* 0 if the field doesn't exist +* -1 if timeout was not set due to condition not being met + +--- + +### HPEXPIRE + +#### Syntax + +```bash + HPEXPIRE key milliseconds [NX | XX | GT | LT] FIELDS numfields field [field ...] +``` + +Similar to HEXPIRE but the timeout is specified in milliseconds instead of seconds. + +The command supports several options to control when the expiration should be set: + +* **NX:** Only set expiry on fields that have no existing expiry +* **XX:** Only set expiry on fields that already have an expiry set +* **GT:** Only set expiry when it's greater than the current expiry +* **LT:** Only set expiry when it's less than the current expiry + +The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive. + +#### Resp Reply + +Array reply: For each field, returns: + +* 1 if the timeout was set +* 0 if the field doesn't exist +* -1 if timeout was not set due to condition not being met + +--- + +### HPEXPIREAT + +#### Syntax + +```bash + HPEXPIREAT key unix-time-milliseconds [NX | XX | GT | LT] FIELDS numfields field [field ...] +``` + +Similar to HEXPIREAT but uses Unix timestamp in milliseconds instead of seconds. + +The command supports several options to control when the expiration should be set: + +* **NX:** Only set expiry on fields that have no existing expiry +* **XX:** Only set expiry on fields that already have an expiry set +* **GT:** Only set expiry when it's greater than the current expiry +* **LT:** Only set expiry when it's less than the current expiry + +The **NX**, **XX**, **GT**, and **LT** options are mutually exclusive. + +#### Resp Reply + +Array reply: For each field, returns: + +* 1 if the timeout was set +* 0 if the field doesn't exist +* -1 if timeout was not set due to condition not being met + +--- + +### HTTL + +#### Syntax + +```bash + HTTL key FIELDS numfields field [field ...] +``` + +Returns the remaining time to live in seconds for one or more hash fields that have a timeout set. + +#### Resp Reply + +Array reply: For each field, returns: + +* TTL in seconds if the field exists and has an expiry set +* -1 if the field exists but has no expiry set +* -2 if the field does not exist + +--- + +### HPTTL + +#### Syntax + +```bash + HPTTL key FIELDS numfields field [field ...] +``` + +Similar to HTTL but returns the remaining time to live in milliseconds instead of seconds. + +#### Resp Reply + +Array reply: For each field, returns: + +* TTL in milliseconds if the field exists and has an expiry set +* -1 if the field exists but has no expiry set +* -2 if the field does not exist + +--- + +### HEXPIRETIME + +#### Syntax + +```bash + HEXPIRETIME key FIELDS numfields field [field ...] +``` + +Returns the absolute Unix timestamp (in seconds) at which the specified hash fields will expire. + +#### Resp Reply + +Array reply: For each field, returns: + +* Unix timestamp in seconds when the field will expire +* -1 if the field exists but has no expiry set +* -2 if the field does not exist + +--- + +### HPEXPIRETIME + +#### Syntax + +```bash + HPEXPIRETIME key FIELDS numfields field [field ...] +``` + +Similar to HEXPIRETIME but returns the expiry timestamp in milliseconds instead of seconds. + +#### Resp Reply + +Array reply: For each field, returns: + +* Unix timestamp in milliseconds when the field will expire +* -1 if the field exists but has no expiry set +* -2 if the field does not exist + +--- + +### HPERSIST + +#### Syntax + +```bash + HPERSIST key FIELDS numfields field [field ...] +``` + +Removes the expiration from the specified hash fields, making them persistent. + +#### Resp Reply + +Array reply: For each field, returns: + +* 1 if the timeout was removed +* 0 if the field exists but has no timeout +* -1 if the field does not exist + +--- + ## List ### BLMOVE From 4a7a71179adce3bc5bf60709d70fc0230868f29f Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 20:21:23 +0530 Subject: [PATCH 08/24] Added HCOLLECT and ACL, Slot verification tests --- libs/resources/RespCommandsDocs.json | 6 + libs/resources/RespCommandsInfo.json | 10 + libs/server/API/GarnetApiObjectCommands.cs | 3 + libs/server/API/IGarnetApi.cs | 7 + libs/server/Objects/Hash/HashObject.cs | 3 + libs/server/Objects/Hash/HashObjectImpl.cs | 10 + libs/server/Resp/AdminCommands.cs | 33 +++- libs/server/Resp/CmdStrings.cs | 1 + libs/server/Resp/Objects/HashCommands.cs | 27 +++ libs/server/Resp/Parser/RespCommand.cs | 5 + libs/server/Resp/RespServerSession.cs | 6 +- .../Storage/Session/ObjectStore/HashOps.cs | 33 +++- .../CommandInfoUpdater/SupportedCommand.cs | 1 + .../RedirectTests/BaseCommand.cs | 173 ++++++++++++++++++ .../ClusterSlotVerificationTests.cs | 70 +++++++ test/Garnet.test/Resp/ACL/RespCommandTests.cs | 159 ++++++++++++++++ test/Garnet.test/RespHashTests.cs | 44 ++++- website/docs/commands/data-structures.md | 18 ++ 18 files changed, 602 insertions(+), 7 deletions(-) diff --git a/libs/resources/RespCommandsDocs.json b/libs/resources/RespCommandsDocs.json index 9d9c8ca65e..9daf6e1541 100644 --- a/libs/resources/RespCommandsDocs.json +++ b/libs/resources/RespCommandsDocs.json @@ -2751,6 +2751,12 @@ } ] }, + { + "Command": "HCOLLECT", + "Name": "HCOLLECT", + "Summary": "Manually trigger deletion of expired fields from memory", + "Group": "Hash" + }, { "Command": "HDEL", "Name": "HDEL", diff --git a/libs/resources/RespCommandsInfo.json b/libs/resources/RespCommandsInfo.json index f69d67c52b..d91d4d3c39 100644 --- a/libs/resources/RespCommandsInfo.json +++ b/libs/resources/RespCommandsInfo.json @@ -1561,6 +1561,16 @@ ], "SubCommands": null }, + { + "Command": "HCOLLECT", + "Name": "HCOLLECT", + "Arity": 1, + "Flags": "Admin, Write", + "FirstKey": 1, + "LastKey": 1, + "Step": 1, + "AclCategories": "Hash, Write, Admin, Garnet" + }, { "Command": "HDEL", "Name": "HDEL", diff --git a/libs/server/API/GarnetApiObjectCommands.cs b/libs/server/API/GarnetApiObjectCommands.cs index 6097e51bd5..9e6f956f8c 100644 --- a/libs/server/API/GarnetApiObjectCommands.cs +++ b/libs/server/API/GarnetApiObjectCommands.cs @@ -493,6 +493,9 @@ public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) => storageSession.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter, ref objectContext); + public GarnetStatus HashCollect(ArgSlice key, ref ObjectInput input) + => storageSession.HashCollect(key, ref input, ref objectContext); + #endregion } diff --git a/libs/server/API/IGarnetApi.cs b/libs/server/API/IGarnetApi.cs index 2a9d214bc0..e003016db3 100644 --- a/libs/server/API/IGarnetApi.cs +++ b/libs/server/API/IGarnetApi.cs @@ -993,6 +993,13 @@ public interface IGarnetApi : IGarnetReadApi, IGarnetAdvancedApi /// The status of the operation. GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter); + /// + /// Delete already expired fields from the hash. + /// + /// The key of the hash. + /// The status of the operation. + GarnetStatus HashCollect(ArgSlice key, ref ObjectInput input); + #endregion #region BitMaps Methods diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index 6f733c8909..560897c641 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -227,6 +227,9 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory case HashOperation.HRANDFIELD: HashRandomField(ref input, ref output); break; + case HashOperation.HCOLLECT: + HashCollect(ref input, _output); + break; case HashOperation.HSCAN: if (ObjectUtils.ReadScanInput(ref input, ref output, out var cursorInput, out var pattern, out var patternLength, out var limitCount, out bool isNoValue, out var error)) diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index 956fb76124..fa3d84323f 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -298,6 +298,16 @@ private void HashSet(ref ObjectInput input, byte* output) } } + private void HashCollect(ref ObjectInput input, byte* output) + { + var _output = (ObjectOutputHeader*)output; + *_output = default; + + DeleteExpiredItems(); + + _output->result1 = 1; + } + private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory output) { var count = Count(); diff --git a/libs/server/Resp/AdminCommands.cs b/libs/server/Resp/AdminCommands.cs index 923e3dbb5d..1630f6af26 100644 --- a/libs/server/Resp/AdminCommands.cs +++ b/libs/server/Resp/AdminCommands.cs @@ -18,7 +18,7 @@ namespace Garnet.server /// internal sealed unsafe partial class RespServerSession : ServerSessionBase { - private void ProcessAdminCommands(RespCommand command) + private void ProcessAdminCommands(RespCommand command, ref TGarnetApi storageApi) where TGarnetApi : IGarnetApi { /* * WARNING: Here is safe to add @slow commands (check how containsSlowCommand is used). @@ -49,6 +49,7 @@ RespCommand.MIGRATE or RespCommand.BGSAVE => NetworkBGSAVE(), RespCommand.COMMITAOF => NetworkCOMMITAOF(), RespCommand.FORCEGC => NetworkFORCEGC(), + RespCommand.HCOLLECT => NetworkHCOLLECT(ref storageApi), RespCommand.MONITOR => NetworkMonitor(), RespCommand.ACL_DELUSER => NetworkAclDelUser(), RespCommand.ACL_LIST => NetworkAclList(), @@ -567,6 +568,36 @@ private bool NetworkFORCEGC() return true; } + private bool NetworkHCOLLECT(ref TGarnetApi storageApi) + where TGarnetApi : IGarnetApi + { + if (parseState.Count != 1) + { + return AbortWithWrongNumberOfArguments(nameof(RespCommand.HCOLLECT)); + } + + var key = parseState.GetArgSliceByRef(0); + + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; + var input = new ObjectInput(header); + + var status = storageApi.HashCollect(key, ref input); + + switch (status) + { + case GarnetStatus.OK: + while (!RespWriteUtils.WriteDirect(CmdStrings.RESP_OK, ref dcurr, dend)) + SendAndReset(); + break; + default: + while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERRNOTFOUND, ref dcurr, dend)) + SendAndReset(); + break; + } + + return true; + } + private bool NetworkProcessClusterCommand(RespCommand command) { if (clusterSession == null) diff --git a/libs/server/Resp/CmdStrings.cs b/libs/server/Resp/CmdStrings.cs index fcb39ce3ee..03d7ba7cfb 100644 --- a/libs/server/Resp/CmdStrings.cs +++ b/libs/server/Resp/CmdStrings.cs @@ -105,6 +105,7 @@ static partial class CmdStrings public static ReadOnlySpan MAXLEN => "MAXLEN"u8; public static ReadOnlySpan maxlen => "maxlen"u8; public static ReadOnlySpan PUBSUB => "PUBSUB"u8; + public static ReadOnlySpan HCOLLECT => "HCOLLECT"u8; public static ReadOnlySpan CHANNELS => "CHANNELS"u8; public static ReadOnlySpan NUMPAT => "NUMPAT"u8; public static ReadOnlySpan NUMSUB => "NUMSUB"u8; diff --git a/libs/server/Resp/Objects/HashCommands.cs b/libs/server/Resp/Objects/HashCommands.cs index 8cb1774570..687429f3b8 100644 --- a/libs/server/Resp/Objects/HashCommands.cs +++ b/libs/server/Resp/Objects/HashCommands.cs @@ -659,6 +659,15 @@ private unsafe bool HashExpire(RespCommand command, ref TGarnetApi s while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend)) SendAndReset(); break; + case GarnetStatus.NOTFOUND: + while (!RespWriteUtils.WriteArrayLength(numFields, ref dcurr, dend)) + SendAndReset(); + for (var i = 0; i < numFields; i++) + { + while (!RespWriteUtils.WriteInteger(-2, ref dcurr, dend)) + SendAndReset(); + } + break; default: ProcessOutputWithHeader(outputFooter.spanByteAndMemory); break; @@ -740,6 +749,15 @@ private unsafe bool HashTimeToLive(RespCommand command, ref TGarnetA while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend)) SendAndReset(); break; + case GarnetStatus.NOTFOUND: + while (!RespWriteUtils.WriteArrayLength(numFields, ref dcurr, dend)) + SendAndReset(); + for (var i = 0; i < numFields; i++) + { + while (!RespWriteUtils.WriteInteger(-2, ref dcurr, dend)) + SendAndReset(); + } + break; default: ProcessOutputWithHeader(outputFooter.spanByteAndMemory); break; @@ -793,6 +811,15 @@ private unsafe bool HashPersist(ref TGarnetApi storageApi) while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_WRONG_TYPE, ref dcurr, dend)) SendAndReset(); break; + case GarnetStatus.NOTFOUND: + while (!RespWriteUtils.WriteArrayLength(numFields, ref dcurr, dend)) + SendAndReset(); + for (var i = 0; i < numFields; i++) + { + while (!RespWriteUtils.WriteInteger(-2, ref dcurr, dend)) + SendAndReset(); + } + break; default: ProcessOutputWithHeader(outputFooter.spanByteAndMemory); break; diff --git a/libs/server/Resp/Parser/RespCommand.cs b/libs/server/Resp/Parser/RespCommand.cs index f0b41c6779..05d671a01e 100644 --- a/libs/server/Resp/Parser/RespCommand.cs +++ b/libs/server/Resp/Parser/RespCommand.cs @@ -107,6 +107,7 @@ public enum RespCommand : ushort GETDEL, GETEX, GETSET, + HCOLLECT, HDEL, HEXPIRE, HPEXPIRE, @@ -2099,6 +2100,10 @@ private RespCommand SlowParseCommand(ref int count, ref ReadOnlySpan speci return RespCommand.NONE; } } + else if (command.SequenceEqual(CmdStrings.HCOLLECT)) + { + return RespCommand.HCOLLECT; + } else { // Custom commands should have never been set when we reach this point diff --git a/libs/server/Resp/RespServerSession.cs b/libs/server/Resp/RespServerSession.cs index dde9fba92d..c9d1c6eeef 100644 --- a/libs/server/Resp/RespServerSession.cs +++ b/libs/server/Resp/RespServerSession.cs @@ -758,7 +758,7 @@ private bool ProcessOtherCommands(RespCommand command, ref TGarnetAp RespCommand.SCRIPT => TrySCRIPT(), RespCommand.EVAL => TryEVAL(), RespCommand.EVALSHA => TryEVALSHA(), - _ => Process(command) + _ => Process(command, ref storageApi) }; bool NetworkCLIENTID() @@ -806,9 +806,9 @@ bool NetworkCustomProcedure() } [MethodImpl(MethodImplOptions.AggressiveInlining)] - bool Process(RespCommand command) + bool Process(RespCommand command, ref TGarnetApi storageApi) { - ProcessAdminCommands(command); + ProcessAdminCommands(command, ref storageApi); return true; } diff --git a/libs/server/Storage/Session/ObjectStore/HashOps.cs b/libs/server/Storage/Session/ObjectStore/HashOps.cs index b2e9a9a972..14bec04637 100644 --- a/libs/server/Storage/Session/ObjectStore/HashOps.cs +++ b/libs/server/Storage/Session/ObjectStore/HashOps.cs @@ -585,7 +585,7 @@ public GarnetStatus HashTimeToLive(ArgSlice key, bool isMillisec { var innerInput = new ObjectInput(input.header, ref input.parseState, arg1: isMilliseconds ? 1 : 0, arg2: isTimestamp ? 1 : 0); - return RMWObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter); + return ReadObjectStoreOperationWithOutput(key.ToArray(), ref innerInput, ref objectContext, ref outputFooter); } /// @@ -600,5 +600,36 @@ public GarnetStatus HashTimeToLive(ArgSlice key, bool isMillisec public GarnetStatus HashPersist(ArgSlice key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key.ToArray(), ref input, ref objectContext, ref outputFooter); + + public GarnetStatus HashCollect(ArgSlice key, ref ObjectInput input, ref TObjectContext objectContext) + where TObjectContext : ITsavoriteContext + { + if (key.ReadOnlySpan.SequenceEqual("*"u8)) + { + long cursor = 0; + long storeCursor = 0; + + // Scan all hash keys in batches + do + { + if (!DbScan(key, true, cursor, out storeCursor, out var hashKeys, 100, CmdStrings.HASH)) + { + return GarnetStatus.NOTFOUND; + } + + // Process each hash key + foreach (var hashKey in hashKeys) + { + RMWObjectStoreOperation(hashKey, ref input, out _, ref objectContext); + } + + cursor = storeCursor; + } while (storeCursor != 0); + + return GarnetStatus.OK; + } + + return RMWObjectStoreOperation(key.ToArray(), ref input, out _, ref objectContext); + } } } \ No newline at end of file diff --git a/playground/CommandInfoUpdater/SupportedCommand.cs b/playground/CommandInfoUpdater/SupportedCommand.cs index 66d891341d..abd1650fb1 100644 --- a/playground/CommandInfoUpdater/SupportedCommand.cs +++ b/playground/CommandInfoUpdater/SupportedCommand.cs @@ -136,6 +136,7 @@ public class SupportedCommand new("GETDEL", RespCommand.GETDEL), new("GETRANGE", RespCommand.GETRANGE), new("GETSET", RespCommand.GETSET), + new("HCOLLECT", RespCommand.HCOLLECT), new("HDEL", RespCommand.HDEL), new("HELLO", RespCommand.HELLO), new("HEXISTS", RespCommand.HEXISTS), diff --git a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs index e256ede56c..4d18e92f16 100644 --- a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs +++ b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs @@ -2255,5 +2255,178 @@ public override string[] GetSingleSlotRequest() public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); } + + internal class HEXPIRE : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HEXPIRE); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "3", "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HPEXPIRE : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HPEXPIRE); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "3000", "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HEXPIREAT : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HEXPIREAT); + + public override string[] GetSingleSlotRequest() + { + var timestamp = DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(); + var ssk = GetSingleSlotKeys; + return [ssk[0], timestamp, "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HPEXPIREAT : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HPEXPIREAT); + + public override string[] GetSingleSlotRequest() + { + var timestamp = DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(); + var ssk = GetSingleSlotKeys; + return [ssk[0], timestamp, "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HTTL : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HTTL); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HPTTL : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HPTTL); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HEXPIRETIME : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HEXPIRETIME); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HPEXPIRETIME : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HPEXPIRETIME); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HPERSIST : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => true; + public override string Command => nameof(HPERSIST); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0], "FIELDS", "1", "field1"]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + + internal class HCOLLECT : BaseCommand + { + public override bool IsArrayCommand => false; + public override bool ArrayResponse => false; + public override string Command => nameof(HCOLLECT); + + public override string[] GetSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + return [ssk[0]]; + } + + public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); + + public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + } + #endregion } \ No newline at end of file diff --git a/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs b/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs index 59c6bb488a..17036f535d 100644 --- a/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs +++ b/test/Garnet.test.cluster/RedirectTests/ClusterSlotVerificationTests.cs @@ -121,6 +121,16 @@ public class ClusterSlotVerificationTests new HEXISTS(), new HKEYS(), new HINCRBY(), + new HEXPIRE(), + new HPEXPIRE(), + new HEXPIREAT(), + new HPEXPIREAT(), + new HTTL(), + new HPTTL(), + new HEXPIRETIME(), + new HPEXPIRETIME(), + new HPERSIST(), + new HCOLLECT(), new CLUSTERGETPROC(), new CLUSTERSETPROC(), new WATCH(), @@ -305,6 +315,16 @@ public virtual void OneTimeTearDown() [TestCase("HEXISTS")] [TestCase("HKEYS")] [TestCase("HINCRBY")] + [TestCase("HEXPIRE")] + [TestCase("HPEXPIRE")] + [TestCase("HEXPIREAT")] + [TestCase("HPEXPIREAT")] + [TestCase("HTTL")] + [TestCase("HPTTL")] + [TestCase("HEXPIRETIME")] + [TestCase("HPEXPIRETIME")] + [TestCase("HPERSIST")] + [TestCase("HCOLLECT")] [TestCase("CLUSTERGETPROC")] [TestCase("CLUSTERSETPROC")] [TestCase("WATCH")] @@ -449,6 +469,16 @@ void GarnetClientSessionClusterDown(BaseCommand command) [TestCase("HEXISTS")] [TestCase("HKEYS")] [TestCase("HINCRBY")] + [TestCase("HEXPIRE")] + [TestCase("HPEXPIRE")] + [TestCase("HEXPIREAT")] + [TestCase("HPEXPIREAT")] + [TestCase("HTTL")] + [TestCase("HPTTL")] + [TestCase("HEXPIRETIME")] + [TestCase("HPEXPIRETIME")] + [TestCase("HPERSIST")] + [TestCase("HCOLLECT")] [TestCase("CLUSTERGETPROC")] [TestCase("CLUSTERSETPROC")] [TestCase("WATCHMS")] @@ -603,6 +633,16 @@ void GarnetClientSessionOK(BaseCommand command) [TestCase("HEXISTS")] [TestCase("HKEYS")] [TestCase("HINCRBY")] + [TestCase("HEXPIRE")] + [TestCase("HPEXPIRE")] + [TestCase("HEXPIREAT")] + [TestCase("HPEXPIREAT")] + [TestCase("HTTL")] + [TestCase("HPTTL")] + [TestCase("HEXPIRETIME")] + [TestCase("HPEXPIRETIME")] + [TestCase("HPERSIST")] + [TestCase("HCOLLECT")] [TestCase("CLUSTERGETPROC")] [TestCase("CLUSTERSETPROC")] [TestCase("WATCHMS")] @@ -749,6 +789,16 @@ void GarnetClientSessionCrossslotTest(BaseCommand command) [TestCase("HEXISTS")] [TestCase("HKEYS")] [TestCase("HINCRBY")] + [TestCase("HEXPIRE")] + [TestCase("HPEXPIRE")] + [TestCase("HEXPIREAT")] + [TestCase("HPEXPIREAT")] + [TestCase("HTTL")] + [TestCase("HPTTL")] + [TestCase("HEXPIRETIME")] + [TestCase("HPEXPIRETIME")] + [TestCase("HPERSIST")] + [TestCase("HCOLLECT")] [TestCase("CLUSTERGETPROC")] [TestCase("CLUSTERSETPROC")] [TestCase("WATCHMS")] @@ -902,6 +952,16 @@ void GarnetClientSessionMOVEDTest(BaseCommand command) [TestCase("HEXISTS")] [TestCase("HKEYS")] [TestCase("HINCRBY")] + [TestCase("HEXPIRE")] + [TestCase("HPEXPIRE")] + [TestCase("HEXPIREAT")] + [TestCase("HPEXPIREAT")] + [TestCase("HTTL")] + [TestCase("HPTTL")] + [TestCase("HEXPIRETIME")] + [TestCase("HPEXPIRETIME")] + [TestCase("HPERSIST")] + [TestCase("HCOLLECT")] [TestCase("CLUSTERGETPROC")] [TestCase("CLUSTERSETPROC")] [TestCase("WATCHMS")] @@ -1072,6 +1132,16 @@ void GarnetClientSessionASKTest(BaseCommand command) [TestCase("HEXISTS")] [TestCase("HKEYS")] [TestCase("HINCRBY")] + [TestCase("HEXPIRE")] + [TestCase("HPEXPIRE")] + [TestCase("HEXPIREAT")] + [TestCase("HPEXPIREAT")] + [TestCase("HTTL")] + [TestCase("HPTTL")] + [TestCase("HEXPIRETIME")] + [TestCase("HPEXPIRETIME")] + [TestCase("HPERSIST")] + [TestCase("HCOLLECT")] [TestCase("CLUSTERGETPROC")] [TestCase("CLUSTERSETPROC")] [TestCase("WATCHMS")] diff --git a/test/Garnet.test/Resp/ACL/RespCommandTests.cs b/test/Garnet.test/Resp/ACL/RespCommandTests.cs index 63f38dac7c..a77d5b3b64 100644 --- a/test/Garnet.test/Resp/ACL/RespCommandTests.cs +++ b/test/Garnet.test/Resp/ACL/RespCommandTests.cs @@ -3135,6 +3135,165 @@ static async Task DoSubStringAsync(GarnetClient client) } } + [Test] + public async Task HExpireACLsAsync() + { + await CheckCommandsAsync( + "HEXPIRE", + [DoHExpireAsync] + ); + + static async Task DoHExpireAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HEXPIRE", ["foo", "1", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HPExpireACLsAsync() + { + await CheckCommandsAsync( + "HPEXPIRE", + [DoHPExpireAsync] + ); + + static async Task DoHPExpireAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HPEXPIRE", ["foo", "1", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HExpireAtACLsAsync() + { + await CheckCommandsAsync( + "HEXPIREAT", + [DoHExpireAtAsync] + ); + + static async Task DoHExpireAtAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HEXPIREAT", ["foo", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeSeconds().ToString(), "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HPExpireAtACLsAsync() + { + await CheckCommandsAsync( + "HPEXPIREAT", + [DoHPExpireAtAsync] + ); + + static async Task DoHPExpireAtAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HPEXPIREAT", ["foo", DateTimeOffset.UtcNow.AddSeconds(3).ToUnixTimeMilliseconds().ToString(), "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HExpireTimeACLsAsync() + { + await CheckCommandsAsync( + "HEXPIRETIME", + [DoHExpireTimeAsync] + ); + + static async Task DoHExpireTimeAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HEXPIRETIME", ["foo", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HPExpireTimeACLsAsync() + { + await CheckCommandsAsync( + "HPEXPIRETIME", + [DoHPExpireTimeAsync] + ); + + static async Task DoHPExpireTimeAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HPEXPIRETIME", ["foo", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HTTLACLsAsync() + { + await CheckCommandsAsync( + "HTTL", + [DoHETTLAsync] + ); + + static async Task DoHETTLAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HTTL", ["foo", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HPTTLACLsAsync() + { + await CheckCommandsAsync( + "HPTTL", + [DoHPETTLAsync] + ); + + static async Task DoHPETTLAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HPTTL", ["foo", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HPersistACLsAsync() + { + await CheckCommandsAsync( + "HPERSIST", + [DoHPersistAsync] + ); + + static async Task DoHPersistAsync(GarnetClient client) + { + var val = await client.ExecuteForStringArrayResultAsync("HPERSIST", ["foo", "FIELDS", "1", "bar"]); + ClassicAssert.AreEqual(1, val.Length); + ClassicAssert.AreEqual("-2", val[0]); + } + } + + [Test] + public async Task HCollectACLsAsync() + { + await CheckCommandsAsync( + "HCOLLECT", + [DoHCollectAsync] + ); + + static async Task DoHCollectAsync(GarnetClient client) + { + var val = await client.ExecuteForStringResultAsync("HCOLLECT", ["foo"]); + ClassicAssert.AreEqual("OK", val); + } + } + [Test] public async Task HDelACLsAsync() { diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 580774477c..f9b345084a 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -130,8 +130,6 @@ public void CanSetAndGetMultiplePairs() ClassicAssert.AreEqual("2021", result[1].ToString()); } - - [Test] public void CanDelSingleField() { @@ -1161,6 +1159,48 @@ public async Task CanDoHashExpireLTM() ClassicAssert.AreEqual("1", data[1].Value.ToString()); } + [Test] + public void CanDoHashExpireWithNonExistKey() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + + var result = db.Execute("HEXPIRE", "myhash", "3", "FIELDS", "1", "field1"); + var results = (RedisResult[])result; + ClassicAssert.AreEqual(1, results.Length); + ClassicAssert.AreEqual(-2, (long)results[0]); + } + + [Test] + public async Task CanDoHashCollect() + { + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + var db = redis.GetDatabase(0); + db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "value3"), new HashEntry("field4", "value4"), new HashEntry("field5", "value5"), new HashEntry("field6", "value6")]); + + var result = db.Execute("HEXPIRE", "myhash", "1", "FIELDS", "2", "field1", "field2"); + var results = (RedisResult[])result; + ClassicAssert.AreEqual(2, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); + ClassicAssert.AreEqual(1, (long)results[1]); + + result = db.Execute("HEXPIRE", "myhash", "2", "FIELDS", "2", "field3", "field4"); + results = (RedisResult[])result; + ClassicAssert.AreEqual(2, results.Length); + ClassicAssert.AreEqual(1, (long)results[0]); + ClassicAssert.AreEqual(1, (long)results[1]); + + await Task.Delay(1000); + + var collectResult = (string)db.Execute("HCOLLECT", "myhash"); + ClassicAssert.AreEqual("OK", collectResult); + + await Task.Delay(1000); + + collectResult = (string)db.Execute("HCOLLECT", "*"); + ClassicAssert.AreEqual("OK", collectResult); + } + [Test] [TestCase("HEXPIRE", "NX", Description = "Set expiry only when no expiration exists")] [TestCase("HEXPIRE", "XX", Description = "Set expiry only when expiration exists")] diff --git a/website/docs/commands/data-structures.md b/website/docs/commands/data-structures.md index 39c22e4f8b..d503754dc8 100644 --- a/website/docs/commands/data-structures.md +++ b/website/docs/commands/data-structures.md @@ -434,6 +434,24 @@ Array reply: For each field, returns: --- +### HCOLLECT + +#### Syntax + +```bash + HCOLLECT key +``` + +Manualy trigger cleanup of expired field from memory for a given Hash set key. + +Use `*` as the key to collect it from all hash keys. + +#### Resp Reply + +Simple reply: OK response + +--- + ## List ### BLMOVE From f31633cad3622a4bc084418f5c72e79bf679b2d5 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 21:41:01 +0530 Subject: [PATCH 09/24] Add a background task to call HCOLLECT to delete expired items --- libs/host/Configuration/Options.cs | 4 ++ libs/server/Servers/GarnetServerOptions.cs | 5 +++ libs/server/StoreWrapper.cs | 44 +++++++++++++++++++ website/docs/getting-started/configuration.md | 1 + 4 files changed, 54 insertions(+) diff --git a/libs/host/Configuration/Options.cs b/libs/host/Configuration/Options.cs index 96fdd62433..768bc0d6f7 100644 --- a/libs/host/Configuration/Options.cs +++ b/libs/host/Configuration/Options.cs @@ -233,6 +233,9 @@ internal sealed class Options [IntRangeValidation(0, int.MaxValue)] [Option("compaction-freq", Required = false, HelpText = "Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead)")] public int CompactionFrequencySecs { get; set; } + [IntRangeValidation(0, int.MaxValue)] + [Option("hcollect-freq", Required = false, HelpText = "Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation.")] + public int HashCollectFrequencySecs { get; set; } [Option("compaction-type", Required = false, HelpText = "Hybrid log compaction type. Value options: None - no compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss)")] public LogCompactionType CompactionType { get; set; } @@ -652,6 +655,7 @@ public GarnetServerOptions GetServerOptions(ILogger logger = null) WaitForCommit = WaitForCommit.GetValueOrDefault(), AofSizeLimit = AofSizeLimit, CompactionFrequencySecs = CompactionFrequencySecs, + HashCollectFrequencySecs = HashCollectFrequencySecs, CompactionType = CompactionType, CompactionForceDelete = CompactionForceDelete.GetValueOrDefault(), CompactionMaxSegments = CompactionMaxSegments, diff --git a/libs/server/Servers/GarnetServerOptions.cs b/libs/server/Servers/GarnetServerOptions.cs index 0093cfd8e0..92effae6d8 100644 --- a/libs/server/Servers/GarnetServerOptions.cs +++ b/libs/server/Servers/GarnetServerOptions.cs @@ -136,6 +136,11 @@ public class GarnetServerOptions : ServerOptions /// public int CompactionFrequencySecs = 0; + /// + /// Hash collection frequency in seconds. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. + /// + public int HashCollectFrequencySecs = 0; + /// /// Hybrid log compaction type. /// None - no compaction. diff --git a/libs/server/StoreWrapper.cs b/libs/server/StoreWrapper.cs index b94833916f..c1e1f36cad 100644 --- a/libs/server/StoreWrapper.cs +++ b/libs/server/StoreWrapper.cs @@ -17,6 +17,7 @@ namespace Garnet.server { + using static System.Reflection.Metadata.BlobBuilder; using MainStoreAllocator = SpanByteAllocator>; using MainStoreFunctions = StoreFunctions; @@ -413,6 +414,44 @@ async Task CompactionTask(int compactionFrequencySecs, CancellationToken token = } } + async Task HashCollectTask(int hashCollectFrequencySecs, CancellationToken token = default) + { + Debug.Assert(hashCollectFrequencySecs > 0); + try + { + var scratchBufferManager = new ScratchBufferManager(); + using var storageSession = new StorageSession(this, scratchBufferManager, null, null, logger); + var key = ArgSlice.FromPinnedSpan("*"u8); + + while (true) + { + if (token.IsCancellationRequested) return; + + if (objectStore is null) + { + logger?.LogWarning("HashCollectFrequencySecs option is configured but Object store is disabled. Stopping the background hash collect task."); + return; + } + + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; + var input = new ObjectInput(header); + + storageSession.HashCollect(key, ref input, ref storageSession.objectStoreBasicContext); + scratchBufferManager.Reset(); + + await Task.Delay(hashCollectFrequencySecs * 1000, token); + } + } + catch (TaskCanceledException ex) when (token.IsCancellationRequested) + { + logger?.LogError(ex, "CompactionTask exception received for background hash collect task."); + } + catch (Exception ex) + { + logger?.LogCritical(ex, "Unknown exception received for background hash collect task. Hash collect task won't be resumed."); + } + } + void DoCompaction() { // Periodic compaction -> no need to compact before checkpointing @@ -567,6 +606,11 @@ internal void Start() Task.Run(async () => await CompactionTask(serverOptions.CompactionFrequencySecs, ctsCommit.Token)); } + if (serverOptions.HashCollectFrequencySecs > 0) + { + Task.Run(async () => await HashCollectTask(serverOptions.HashCollectFrequencySecs, ctsCommit.Token)); + } + if (serverOptions.AdjustedIndexMaxCacheLines > 0 || serverOptions.AdjustedObjectStoreIndexMaxCacheLines > 0) { Task.Run(() => IndexAutoGrowTask(ctsCommit.Token)); diff --git a/website/docs/getting-started/configuration.md b/website/docs/getting-started/configuration.md index b66cf15f89..eadab48d3d 100644 --- a/website/docs/getting-started/configuration.md +++ b/website/docs/getting-started/configuration.md @@ -119,6 +119,7 @@ For all available command line settings, run `GarnetServer.exe -h` or `GarnetSer | **WaitForCommit** | ```--aof-commit-wait``` | ```bool``` | | Wait for AOF to flush the commit before returning results to client. Warning: will greatly increase operation latency. | | **AofSizeLimit** | ```--aof-size-limit``` | ```string``` | Memory size | Maximum size of AOF (rounds down to power of 2) after which unsafe truncation will be applied. Left empty AOF will grow without bound unless a checkpoint is taken | | **CompactionFrequencySecs** | ```--compaction-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) | +| **HashCollectFrequencySecs** | ```--hcollect-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. | | **CompactionType** | ```--compaction-type``` | ```LogCompactionType``` | None, Shift, Scan, Lookup | Hybrid log compaction type. Value options: None - No compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss) | | **CompactionForceDelete** | ```--compaction-force-delete``` | ```bool``` | | Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied. If false, take a checkpoint to actually delete the older data files from disk. | | **CompactionMaxSegments** | ```--compaction-max-segments``` | ```int``` | Integer in range:
[0, MaxValue] | Number of log segments created on disk before compaction triggers. | From 006a2025fd0295df208693e38dd2494e6fb4ec37 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 21:51:30 +0530 Subject: [PATCH 10/24] Code format fix --- test/Garnet.test.cluster/RedirectTests/BaseCommand.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs index 4d18e92f16..fde5af941f 100644 --- a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs +++ b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs @@ -2256,7 +2256,7 @@ public override string[] GetSingleSlotRequest() public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); } - internal class HEXPIRE : BaseCommand + internal class HEXPIRE : BaseCommand { public override bool IsArrayCommand => false; public override bool ArrayResponse => true; @@ -2276,7 +2276,7 @@ public override string[] GetSingleSlotRequest() internal class HPEXPIRE : BaseCommand { public override bool IsArrayCommand => false; - public override bool ArrayResponse => true; + public override bool ArrayResponse => true; public override string Command => nameof(HPEXPIRE); public override string[] GetSingleSlotRequest() From f8628f1a16bd2b79244ca78b182e0f562b59a30b Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 22:31:20 +0530 Subject: [PATCH 11/24] Fixed creation of empty object --- libs/server/Objects/Types/GarnetObject.cs | 6 ++++++ libs/server/Resp/AdminCommands.cs | 2 +- test/Garnet.test/Resp/ACL/RespCommandTests.cs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/libs/server/Objects/Types/GarnetObject.cs b/libs/server/Objects/Types/GarnetObject.cs index f5366dffbb..7474d547e7 100644 --- a/libs/server/Objects/Types/GarnetObject.cs +++ b/libs/server/Objects/Types/GarnetObject.cs @@ -66,6 +66,12 @@ internal static bool NeedToCreate(RespInputHeader header) SetOperation.SPOP => false, _ => true, }, + GarnetObjectType.Hash => header.HashOp switch + { + HashOperation.HEXPIRE => false, + HashOperation.HCOLLECT => false, + _ => true, + }, GarnetObjectType.Expire => false, GarnetObjectType.PExpire => false, GarnetObjectType.Persist => false, diff --git a/libs/server/Resp/AdminCommands.cs b/libs/server/Resp/AdminCommands.cs index 1630f6af26..80d309dfbf 100644 --- a/libs/server/Resp/AdminCommands.cs +++ b/libs/server/Resp/AdminCommands.cs @@ -590,7 +590,7 @@ private bool NetworkHCOLLECT(ref TGarnetApi storageApi) SendAndReset(); break; default: - while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERRNOTFOUND, ref dcurr, dend)) + while (!RespWriteUtils.WriteDirect(CmdStrings.RESP_ERRNOTFOUND, ref dcurr, dend)) SendAndReset(); break; } diff --git a/test/Garnet.test/Resp/ACL/RespCommandTests.cs b/test/Garnet.test/Resp/ACL/RespCommandTests.cs index a77d5b3b64..5e19adb56e 100644 --- a/test/Garnet.test/Resp/ACL/RespCommandTests.cs +++ b/test/Garnet.test/Resp/ACL/RespCommandTests.cs @@ -3290,7 +3290,7 @@ await CheckCommandsAsync( static async Task DoHCollectAsync(GarnetClient client) { var val = await client.ExecuteForStringResultAsync("HCOLLECT", ["foo"]); - ClassicAssert.AreEqual("OK", val); + ClassicAssert.IsNull(val); } } From 85527f2e8e037a29ff26f14735dcd7528d5c4eaf Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 12 Dec 2024 23:56:05 +0530 Subject: [PATCH 12/24] Finial commit before review comment fixes, hopefully --- libs/server/Objects/Hash/HashObject.cs | 89 ++++++++++++++----- libs/server/Objects/Hash/HashObjectImpl.cs | 12 --- .../Storage/Session/Common/MemoryUtils.cs | 6 ++ test/Garnet.test/RespHashTests.cs | 18 +++- 4 files changed, 90 insertions(+), 35 deletions(-) diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index 560897c641..8e7ac49189 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -256,13 +256,44 @@ public override unsafe bool Operate(ref ObjectInput input, ref SpanByteAndMemory private void UpdateSize(ReadOnlySpan key, ReadOnlySpan value, bool add = true) { - // TODO: Should we consider the size of the key and value of the expire dictionary and queue? var size = Utility.RoundUp(key.Length, IntPtr.Size) + Utility.RoundUp(value.Length, IntPtr.Size) + (2 * MemoryUtils.ByteArrayOverhead) + MemoryUtils.DictionaryEntryOverhead; this.Size += add ? size : -size; Debug.Assert(this.Size >= MemoryUtils.DictionaryOverhead); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void InitializeExpirationStructures() + { + if (expirationTimes is null) + { + expirationTimes = new Dictionary(ByteArrayComparer.Instance); + expirationQueue = new PriorityQueue(); + this.Size += MemoryUtils.DictionaryOverhead + MemoryUtils.PriorityQueueOverhead; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void UpdateExpirationSize(ReadOnlySpan key, bool add = true) + { + // Account for dictionary entry and priority queue entry + var size = IntPtr.Size + sizeof(long) + MemoryUtils.DictionaryEntryOverhead + + IntPtr.Size + sizeof(long) + MemoryUtils.PriorityQueueEntryOverhead; + this.Size += add ? size : -size; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void CleanupExpirationStructures() + { + if (expirationTimes.Count == 0) + { + this.Size -= (IntPtr.Size + sizeof(long) + MemoryUtils.PriorityQueueOverhead) * expirationQueue.Count; + this.Size -= MemoryUtils.DictionaryOverhead + MemoryUtils.PriorityQueueOverhead; + expirationTimes = null; + expirationQueue = null; + } + } + /// public override unsafe void Scan(long start, out List items, out long cursor, int count = 10, byte* pattern = default, int patternLength = 0, bool isNoValue = false) { @@ -340,8 +371,7 @@ private void DeleteExpiredItems() if (!hasValue) { - expirationTimes = null; - expirationQueue = null; + CleanupExpirationStructures(); return; } @@ -349,21 +379,25 @@ private void DeleteExpiredItems() { if (expirationTimes.TryGetValue(key, out var actualExpiration) && actualExpiration == expiration) { - hash.Remove(key); expirationTimes.Remove(key); expirationQueue.Dequeue(); + UpdateExpirationSize(key, false); + if (hash.TryGetValue(key, out var value)) + { + hash.Remove(key); + UpdateSize(key, value, false); + } } else { expirationQueue.Dequeue(); + this.Size -= MemoryUtils.PriorityQueueEntryOverhead + IntPtr.Size + sizeof(long); } - // TODO: Update size based on if or else condition hasValue = expirationQueue.TryPeek(out key, out expiration); if (!hasValue) { - expirationTimes = null; - expirationQueue = null; + CleanupExpirationStructures(); break; } } @@ -382,7 +416,12 @@ private bool TryGetValue(byte[] key, out byte[] value) private bool Remove(byte[] key, out byte[] value) { DeleteExpiredItems(); - return hash.Remove(key, out value); + var result = hash.Remove(key, out value); + if (result) + { + UpdateSize(key, value, false); + } + return result; } private int Count() @@ -450,12 +489,16 @@ private void Add(byte[] key, byte[] value) { DeleteExpiredItems(); hash.Add(key, value); + UpdateSize(key, value); } private void Set(byte[] key, byte[] value) { DeleteExpiredItems(); hash[key] = value; + // Skip overhead as existing item is getting replaced. + this.Size += Utility.RoundUp(value.Length, IntPtr.Size) - + Utility.RoundUp(value.Length, IntPtr.Size); Persist(key); } @@ -463,6 +506,9 @@ private void SetWithoutPersist(byte[] key, byte[] value) { DeleteExpiredItems(); hash[key] = value; + // Skip overhead as existing item is getting replaced. + this.Size += Utility.RoundUp(value.Length, IntPtr.Size) - + Utility.RoundUp(value.Length, IntPtr.Size); } private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption) @@ -478,11 +524,7 @@ private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption return 2; } - if (expirationTimes is null) - { - expirationTimes = new Dictionary(ByteArrayComparer.Instance); - expirationQueue = new PriorityQueue(); - } + InitializeExpirationStructures(); if (expirationTimes.TryGetValue(key, out var currentExpiration)) { @@ -500,6 +542,11 @@ private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption { return 0; } + + expirationTimes[key] = expiration; + expirationQueue.Enqueue(key, expiration); + // Size of dictionary entry already accounted for as the key already exists + this.Size += IntPtr.Size + sizeof(long) + MemoryUtils.PriorityQueueEntryOverhead; } else { @@ -512,10 +559,13 @@ private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption { return 0; } + + expirationTimes[key] = expiration; + expirationQueue.Enqueue(key, expiration); + UpdateExpirationSize(key); } - expirationTimes[key] = expiration; - expirationQueue.Enqueue(key, expiration); + return 1; } @@ -529,13 +579,8 @@ private int Persist(byte[] key) if (expirationTimes is not null && expirationTimes.TryGetValue(key, out var currentExpiration)) { expirationTimes.Remove(key); - - if (expirationTimes.Count == 0) - { - expirationTimes = null; - expirationQueue = null; - } - + this.Size -= IntPtr.Size + sizeof(long) + MemoryUtils.DictionaryEntryOverhead; + CleanupExpirationStructures(); return 1; } diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index fa3d84323f..8ee76eda16 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -154,7 +154,6 @@ private void HashDelete(ref ObjectInput input, byte* output) if (Remove(key, out var hashValue)) { _output->result1++; - this.UpdateSize(key, hashValue, false); } } } @@ -284,16 +283,11 @@ private void HashSet(ref ObjectInput input, byte* output) if (!TryGetValue(key, out var hashValue)) { Add(key, value); - this.UpdateSize(key, value); _output->result1++; } else if ((hop == HashOperation.HSET || hop == HashOperation.HMSET) && hashValue != default) { - // TODO: Update size to remove expiration Set(key, value); - // Skip overhead as existing item is getting replaced. - this.Size += Utility.RoundUp(value.Length, IntPtr.Size) - - Utility.RoundUp(hashValue.Length, IntPtr.Size); } } } @@ -406,14 +400,11 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) resultBytes = resultSpan.ToArray(); SetWithoutPersist(key, resultBytes); - Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) - - Utility.RoundUp(value.Length, IntPtr.Size); } else { resultBytes = incrSlice.SpanByte.ToByteArray(); Add(key, resultBytes); - UpdateSize(key, resultBytes); } while (!RespWriteUtils.WriteIntegerFromBytes(resultBytes, ref curr, end)) @@ -447,14 +438,11 @@ private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) resultBytes = Encoding.ASCII.GetBytes(result.ToString(CultureInfo.InvariantCulture)); SetWithoutPersist(key, resultBytes); - Size += Utility.RoundUp(resultBytes.Length, IntPtr.Size) - - Utility.RoundUp(value.Length, IntPtr.Size); } else { resultBytes = incrSlice.SpanByte.ToByteArray(); Add(key, resultBytes); - UpdateSize(key, resultBytes); } while (!RespWriteUtils.WriteBulkString(resultBytes, ref curr, end)) diff --git a/libs/server/Storage/Session/Common/MemoryUtils.cs b/libs/server/Storage/Session/Common/MemoryUtils.cs index bcbf3e0579..7d76ad3a8e 100644 --- a/libs/server/Storage/Session/Common/MemoryUtils.cs +++ b/libs/server/Storage/Session/Common/MemoryUtils.cs @@ -38,6 +38,12 @@ public static class MemoryUtils /// .Net object avg. overhead for holding a hash set entry public const int HashSetEntryOverhead = 40; + /// .Net object overhead for priority queue + public const int PriorityQueueOverhead = 80; + + /// .Net object avg. overhead for holding a priority queue entry + public const int PriorityQueueEntryOverhead = 48; + internal static long CalculateKeyValueSize(byte[] key, IGarnetObject value) { // Round up key size to account for alignment during allocation diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index f9b345084a..000c4212cc 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -1174,8 +1174,9 @@ public void CanDoHashExpireWithNonExistKey() [Test] public async Task CanDoHashCollect() { - using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig()); + using var redis = ConnectionMultiplexer.Connect(TestUtils.GetConfig(allowAdmin: true)); var db = redis.GetDatabase(0); + var server = redis.GetServers().First(); db.HashSet("myhash", [new HashEntry("field1", "hello"), new HashEntry("field2", "world"), new HashEntry("field3", "value3"), new HashEntry("field4", "value4"), new HashEntry("field5", "value5"), new HashEntry("field6", "value6")]); var result = db.Execute("HEXPIRE", "myhash", "1", "FIELDS", "2", "field1", "field2"); @@ -1190,15 +1191,30 @@ public async Task CanDoHashCollect() ClassicAssert.AreEqual(1, (long)results[0]); ClassicAssert.AreEqual(1, (long)results[1]); + var orginalMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); + await Task.Delay(1000); + var newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); + ClassicAssert.AreEqual(newMemory, orginalMemory); + var collectResult = (string)db.Execute("HCOLLECT", "myhash"); ClassicAssert.AreEqual("OK", collectResult); + newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); + ClassicAssert.Less(newMemory, orginalMemory); + orginalMemory = newMemory; + await Task.Delay(1000); + newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); + ClassicAssert.AreEqual(newMemory, orginalMemory); + collectResult = (string)db.Execute("HCOLLECT", "*"); ClassicAssert.AreEqual("OK", collectResult); + + newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); + ClassicAssert.Less(newMemory, orginalMemory); } [Test] From 14fa5ba44904b3660cde8cbdbb962dc4ffc60941 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Fri, 13 Dec 2024 00:17:31 +0530 Subject: [PATCH 13/24] This time for sure, this is the last commit before review comments --- libs/host/defaults.conf | 3 +++ libs/resources/RespCommandsInfo.json | 19 +++++++++++++++++-- .../RedirectTests/BaseCommand.cs | 8 +++++++- 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/libs/host/defaults.conf b/libs/host/defaults.conf index aa18841004..2f6d3ee2f5 100644 --- a/libs/host/defaults.conf +++ b/libs/host/defaults.conf @@ -162,6 +162,9 @@ /* Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) */ "CompactionFrequencySecs" : 0, + /* Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. */ + "HashCollectFrequencySecs" : 0, + /* Hybrid log compaction type. Value options: */ /* None - no compaction */ /* Shift - shift begin address without compaction (data loss) */ diff --git a/libs/resources/RespCommandsInfo.json b/libs/resources/RespCommandsInfo.json index d91d4d3c39..d21e3148f1 100644 --- a/libs/resources/RespCommandsInfo.json +++ b/libs/resources/RespCommandsInfo.json @@ -1564,12 +1564,27 @@ { "Command": "HCOLLECT", "Name": "HCOLLECT", - "Arity": 1, + "Arity": 2, "Flags": "Admin, Write", "FirstKey": 1, "LastKey": 1, "Step": 1, - "AclCategories": "Hash, Write, Admin, Garnet" + "AclCategories": "Hash, Write, Admin, Garnet", + "KeySpecifications": [ + { + "BeginSearch": { + "TypeDiscriminator": "BeginSearchIndex", + "Index": 1 + }, + "FindKeys": { + "TypeDiscriminator": "FindKeysRange", + "LastKey": 0, + "KeyStep": 1, + "Limit": 0 + }, + "Flags": "RW, Access, Update" + } + ] }, { "Command": "HDEL", diff --git a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs index fde5af941f..85d3cbae50 100644 --- a/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs +++ b/test/Garnet.test.cluster/RedirectTests/BaseCommand.cs @@ -2425,7 +2425,13 @@ public override string[] GetSingleSlotRequest() public override string[] GetCrossSlotRequest() => throw new NotImplementedException(); - public override ArraySegment[] SetupSingleSlotRequest() => throw new NotImplementedException(); + public override ArraySegment[] SetupSingleSlotRequest() + { + var ssk = GetSingleSlotKeys; + var setup = new ArraySegment[1]; + setup[0] = new ArraySegment(["HSET", ssk[0], "a", "1", "b", "2", "c", "3"]); + return setup; + } } #endregion From f0ef454973b2852fcf81c2afb32657c15fe8649d Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Fri, 13 Dec 2024 00:25:45 +0530 Subject: [PATCH 14/24] Fixed code format issue --- libs/server/Objects/Hash/HashObject.cs | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index 8e7ac49189..cdece17d7c 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -565,7 +565,6 @@ private int SetExpiration(byte[] key, long expiration, ExpireOption expireOption UpdateExpirationSize(key); } - return 1; } From 7d7965b23ba97f86c49d9c8600a9a8f05c9fddfb Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Fri, 13 Dec 2024 01:05:21 +0530 Subject: [PATCH 15/24] Trying to fix test failure in pipeline (not happening in local) --- test/Garnet.test/RespHashTests.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 000c4212cc..def709d87a 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -1069,7 +1069,7 @@ public async Task CanDoHashExpire() ClassicAssert.AreEqual(-1, (long)results[1]); // -1 if the field exists but has no associated expiration set. ClassicAssert.AreEqual(-2, (long)results[2]); - await Task.Delay(3000); + await Task.Delay(3500); var items = db.HashGetAll("myhash"); ClassicAssert.AreEqual(2, items.Length); From 0b965463eeab0bffc4cd1b09ee845de7f3380148 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Fri, 13 Dec 2024 20:28:47 +0530 Subject: [PATCH 16/24] Removed AsEnumerable to remove the allocation --- libs/server/Objects/Hash/HashObject.cs | 34 +++-------- libs/server/Objects/Hash/HashObjectImpl.cs | 69 ++++++++++++++++++---- 2 files changed, 63 insertions(+), 40 deletions(-) diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index cdece17d7c..c81353130c 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -460,31 +460,6 @@ private bool ContainsKey(byte[] key) return result; } - private IEnumerable> AsEnumerable() - { - if (HasExpirableItems()) - { - return GetNonExpiredItems(); - } - - return hash.AsEnumerable(); - } - - /// - /// Use `AsEnumerable` instead of this method to avoid checking for expired items if there is no expiring item - /// - /// - private IEnumerable> GetNonExpiredItems() - { - foreach (var item in hash) - { - if (!IsExpired(item.Key)) - { - yield return item; - } - } - } - private void Add(byte[] key, byte[] value) { DeleteExpiredItems(); @@ -606,9 +581,14 @@ private KeyValuePair ElementAt(int index) if (HasExpirableItems()) { var currIndex = 0; - foreach (var item in AsEnumerable()) + foreach (var item in hash) { - if (currIndex == index) + if (IsExpired(item.Key)) + { + continue; + } + + if (currIndex++ == index) { return item; } diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index 8ee76eda16..4668020c0c 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -124,12 +124,30 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } - foreach (var item in AsEnumerable()) + if (HasExpirableItems()) { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + foreach (var item in hash) + { + if (IsExpired(item.Key)) + { + continue; + } + + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + } + else + { + foreach (var item in hash) + { + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } } } finally @@ -320,19 +338,44 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou while (!RespWriteUtils.WriteArrayLength(count, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - foreach (var item in AsEnumerable()) + if (HasExpirableItems()) { - if (HashOperation.HKEYS == op) + foreach (var item in hash) { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + if (IsExpired(item.Key)) + { + continue; + } + + if (HashOperation.HKEYS == op) + { + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + else + { + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + _output.result1++; } - else + } + else + { + foreach (var item in hash) { - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + if (HashOperation.HKEYS == op) + { + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + else + { + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + _output.result1++; } - _output.result1++; } } finally From 23f5ea20ea8e2001487e002a84ab3628b94b13a8 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Fri, 13 Dec 2024 21:25:55 +0530 Subject: [PATCH 17/24] Fixed missed TODO --- libs/server/Objects/Hash/HashObject.cs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index c81353130c..ae54561a2e 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -87,11 +87,10 @@ public HashObject(BinaryReader reader) if (!isExpired) { hash.Add(item, value); - expirationTimes ??= new Dictionary(ByteArrayComparer.Instance); - expirationQueue ??= new PriorityQueue(); + InitializeExpirationStructures(); expirationTimes.Add(item, expiration); expirationQueue.Enqueue(item, expiration); - // TODO: Update size + UpdateExpirationSize(item, true); } } else @@ -460,6 +459,7 @@ private bool ContainsKey(byte[] key) return result; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] private void Add(byte[] key, byte[] value) { DeleteExpiredItems(); @@ -474,7 +474,14 @@ private void Set(byte[] key, byte[] value) // Skip overhead as existing item is getting replaced. this.Size += Utility.RoundUp(value.Length, IntPtr.Size) - Utility.RoundUp(value.Length, IntPtr.Size); - Persist(key); + + // To persist the key, if it has an expiration + if (expirationTimes is not null && expirationTimes.TryGetValue(key, out var currentExpiration)) + { + expirationTimes.Remove(key); + this.Size -= IntPtr.Size + sizeof(long) + MemoryUtils.DictionaryEntryOverhead; + CleanupExpirationStructures(); + } } private void SetWithoutPersist(byte[] key, byte[] value) From 7e71c9bcf0c8fd82ca24793eb21d33d9bfffb1dd Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Sat, 14 Dec 2024 19:07:37 +0530 Subject: [PATCH 18/24] Review comment fixes --- .../Operations/ObjectOperations.cs | 12 ---- libs/host/Configuration/Options.cs | 2 +- libs/host/defaults.conf | 2 +- libs/server/API/GarnetApiObjectCommands.cs | 4 +- libs/server/API/IGarnetApi.cs | 4 +- libs/server/Objects/Hash/HashObject.cs | 21 ++---- libs/server/Resp/AdminCommands.cs | 8 +-- libs/server/Resp/CmdStrings.cs | 1 + .../Storage/Session/ObjectStore/HashOps.cs | 67 ++++++++++++++----- libs/server/StoreWrapper.cs | 19 ++++-- test/Garnet.test/Resp/ACL/RespCommandTests.cs | 2 +- website/docs/commands/data-structures.md | 3 +- website/docs/commands/garnet-specific.md | 19 ++++++ website/docs/getting-started/configuration.md | 2 +- 14 files changed, 100 insertions(+), 66 deletions(-) diff --git a/benchmark/BDN.benchmark/Operations/ObjectOperations.cs b/benchmark/BDN.benchmark/Operations/ObjectOperations.cs index 19b9fd52dc..68c44e79a5 100644 --- a/benchmark/BDN.benchmark/Operations/ObjectOperations.cs +++ b/benchmark/BDN.benchmark/Operations/ObjectOperations.cs @@ -23,23 +23,17 @@ public unsafe class ObjectOperations : OperationsBase byte[] sAddRemRequestBuffer; byte* sAddRemRequestBufferPointer; - static ReadOnlySpan HSETDEL => "*4\r\n$4\r\nHSET\r\n$1\r\nf\r\n$1\r\na\r\n$1\r\na\r\n*3\r\n$4\r\nHDEL\r\n$1\r\nf\r\n$1\r\na\r\n"u8; - byte[] hSetDelRequestBuffer; - byte* hSetDelRequestBufferPointer; - public override void GlobalSetup() { base.GlobalSetup(); SetupOperation(ref zAddRemRequestBuffer, ref zAddRemRequestBufferPointer, ZADDREM); SetupOperation(ref lPushPopRequestBuffer, ref lPushPopRequestBufferPointer, LPUSHPOP); SetupOperation(ref sAddRemRequestBuffer, ref sAddRemRequestBufferPointer, SADDREM); - SetupOperation(ref hSetDelRequestBuffer, ref hSetDelRequestBufferPointer, HSETDEL); // Pre-populate data SlowConsumeMessage("*4\r\n$4\r\nZADD\r\n$1\r\nc\r\n$1\r\n1\r\n$1\r\nd\r\n"u8); SlowConsumeMessage("*3\r\n$5\r\nLPUSH\r\n$1\r\nd\r\n$1\r\nf\r\n"u8); SlowConsumeMessage("*3\r\n$4\r\nSADD\r\n$1\r\ne\r\n$1\r\nb\r\n"u8); - SlowConsumeMessage("*3\r\n$4\r\nHSET\r\n$1\r\nf\r\n$1\r\nb\r\n$1\r\nb\r\n"u8); } [Benchmark] @@ -59,11 +53,5 @@ public void SAddRem() { _ = session.TryConsumeMessages(sAddRemRequestBufferPointer, sAddRemRequestBuffer.Length); } - - [Benchmark] - public void HSetDel() - { - _ = session.TryConsumeMessages(hSetDelRequestBufferPointer, hSetDelRequestBuffer.Length); - } } } \ No newline at end of file diff --git a/libs/host/Configuration/Options.cs b/libs/host/Configuration/Options.cs index 768bc0d6f7..2e96a1a6ba 100644 --- a/libs/host/Configuration/Options.cs +++ b/libs/host/Configuration/Options.cs @@ -234,7 +234,7 @@ internal sealed class Options [Option("compaction-freq", Required = false, HelpText = "Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead)")] public int CompactionFrequencySecs { get; set; } [IntRangeValidation(0, int.MaxValue)] - [Option("hcollect-freq", Required = false, HelpText = "Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation.")] + [Option("hcollect-freq", Required = false, HelpText = "Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. Use the HCOLLECT API to collect on-demand.")] public int HashCollectFrequencySecs { get; set; } [Option("compaction-type", Required = false, HelpText = "Hybrid log compaction type. Value options: None - no compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss)")] diff --git a/libs/host/defaults.conf b/libs/host/defaults.conf index 2f6d3ee2f5..977b0f5b0b 100644 --- a/libs/host/defaults.conf +++ b/libs/host/defaults.conf @@ -162,7 +162,7 @@ /* Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) */ "CompactionFrequencySecs" : 0, - /* Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. */ + /* Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. Use the HCOLLECT API to collect on-demand. */ "HashCollectFrequencySecs" : 0, /* Hybrid log compaction type. Value options: */ diff --git a/libs/server/API/GarnetApiObjectCommands.cs b/libs/server/API/GarnetApiObjectCommands.cs index 9e6f956f8c..e2e32b2797 100644 --- a/libs/server/API/GarnetApiObjectCommands.cs +++ b/libs/server/API/GarnetApiObjectCommands.cs @@ -493,8 +493,8 @@ public GarnetStatus HashScan(ArgSlice key, long cursor, string match, int count, public GarnetStatus HashTimeToLive(ArgSlice key, bool isMilliseconds, bool isTimestamp, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter) => storageSession.HashTimeToLive(key, isMilliseconds, isTimestamp, ref input, ref outputFooter, ref objectContext); - public GarnetStatus HashCollect(ArgSlice key, ref ObjectInput input) - => storageSession.HashCollect(key, ref input, ref objectContext); + public GarnetStatus HashCollect(ReadOnlySpan keys, ref ObjectInput input) + => storageSession.HashCollect(keys, ref input, ref objectContext); #endregion } diff --git a/libs/server/API/IGarnetApi.cs b/libs/server/API/IGarnetApi.cs index e003016db3..f3004dee6a 100644 --- a/libs/server/API/IGarnetApi.cs +++ b/libs/server/API/IGarnetApi.cs @@ -996,9 +996,9 @@ public interface IGarnetApi : IGarnetReadApi, IGarnetAdvancedApi /// /// Delete already expired fields from the hash. /// - /// The key of the hash. + /// The keys of the hash. /// The status of the operation. - GarnetStatus HashCollect(ArgSlice key, ref ObjectInput input); + GarnetStatus HashCollect(ReadOnlySpan keys, ref ObjectInput input); #endregion diff --git a/libs/server/Objects/Hash/HashObject.cs b/libs/server/Objects/Hash/HashObject.cs index ae54561a2e..f9d1fbc95a 100644 --- a/libs/server/Objects/Hash/HashObject.cs +++ b/libs/server/Objects/Hash/HashObject.cs @@ -366,16 +366,10 @@ private void DeleteExpiredItems() if (expirationTimes is null) return; - var hasValue = expirationQueue.TryPeek(out var key, out var expiration); - - if (!hasValue) - { - CleanupExpirationStructures(); - return; - } - - while (expiration < DateTimeOffset.UtcNow.Ticks) + while (expirationQueue.TryPeek(out var key, out var expiration) && expiration < DateTimeOffset.UtcNow.Ticks) { + // expirationTimes and expirationQueue will be out of sync when user is updating the expire time of key which already has some TTL. + // PriorityQueue Doesn't have update option, so we will just enqueue the new expiration and already treat expirationTimes as the source of truth if (expirationTimes.TryGetValue(key, out var actualExpiration) && actualExpiration == expiration) { expirationTimes.Remove(key); @@ -392,14 +386,9 @@ private void DeleteExpiredItems() expirationQueue.Dequeue(); this.Size -= MemoryUtils.PriorityQueueEntryOverhead + IntPtr.Size + sizeof(long); } - - hasValue = expirationQueue.TryPeek(out key, out expiration); - if (!hasValue) - { - CleanupExpirationStructures(); - break; - } } + + CleanupExpirationStructures(); } private bool TryGetValue(byte[] key, out byte[] value) diff --git a/libs/server/Resp/AdminCommands.cs b/libs/server/Resp/AdminCommands.cs index 80d309dfbf..16aa01e797 100644 --- a/libs/server/Resp/AdminCommands.cs +++ b/libs/server/Resp/AdminCommands.cs @@ -571,17 +571,17 @@ private bool NetworkFORCEGC() private bool NetworkHCOLLECT(ref TGarnetApi storageApi) where TGarnetApi : IGarnetApi { - if (parseState.Count != 1) + if (parseState.Count < 1) { return AbortWithWrongNumberOfArguments(nameof(RespCommand.HCOLLECT)); } - var key = parseState.GetArgSliceByRef(0); + var keys = parseState.Parameters; var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; var input = new ObjectInput(header); - var status = storageApi.HashCollect(key, ref input); + var status = storageApi.HashCollect(keys, ref input); switch (status) { @@ -590,7 +590,7 @@ private bool NetworkHCOLLECT(ref TGarnetApi storageApi) SendAndReset(); break; default: - while (!RespWriteUtils.WriteDirect(CmdStrings.RESP_ERRNOTFOUND, ref dcurr, dend)) + while (!RespWriteUtils.WriteError(CmdStrings.RESP_ERR_HCOLLECT_ALREADY_IN_PROGRESS, ref dcurr, dend)) SendAndReset(); break; } diff --git a/libs/server/Resp/CmdStrings.cs b/libs/server/Resp/CmdStrings.cs index d9742ee212..021f5357d0 100644 --- a/libs/server/Resp/CmdStrings.cs +++ b/libs/server/Resp/CmdStrings.cs @@ -215,6 +215,7 @@ static partial class CmdStrings public static ReadOnlySpan RESP_ERR_INVALID_BITFIELD_TYPE => "ERR Invalid bitfield type. Use something like i16 u8. Note that u64 is not supported but i64 is"u8; public static ReadOnlySpan RESP_ERR_SCRIPT_FLUSH_OPTIONS => "ERR SCRIPT FLUSH only support SYNC|ASYNC option"u8; public static ReadOnlySpan RESP_ERR_INVALID_EXPIRE_TIME => "ERR invalid expire time, must be >= 0"u8; + public static ReadOnlySpan RESP_ERR_HCOLLECT_ALREADY_IN_PROGRESS => "ERR HCOLLECT scan already in progress"u8; /// /// Response string templates diff --git a/libs/server/Storage/Session/ObjectStore/HashOps.cs b/libs/server/Storage/Session/ObjectStore/HashOps.cs index 14bec04637..cc59b7acbd 100644 --- a/libs/server/Storage/Session/ObjectStore/HashOps.cs +++ b/libs/server/Storage/Session/ObjectStore/HashOps.cs @@ -15,6 +15,8 @@ namespace Garnet.server /// sealed partial class StorageSession : IDisposable { + private SingleWriterMultiReaderLock _hcollectTaskLock; + /// /// HashSet: Sets the specified fields to their respective values in the hash stored at key. /// Values of specified fields that exist in the hash are overwritten. @@ -601,35 +603,64 @@ public GarnetStatus HashPersist(ArgSlice key, ref ObjectInput in where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key.ToArray(), ref input, ref objectContext, ref outputFooter); - public GarnetStatus HashCollect(ArgSlice key, ref ObjectInput input, ref TObjectContext objectContext) + /// + /// Collects hash keys and performs a specified operation on them. + /// + /// The type of the object context. + /// The keys to collect. + /// The input object containing the operation details. + /// The object context for the operation. + /// The status of the operation. + /// + /// If the first key is "*", all hash keys are scanned in batches and the operation is performed on each key. + /// Otherwise, the operation is performed on the specified keys. + /// + public GarnetStatus HashCollect(ReadOnlySpan keys, ref ObjectInput input, ref TObjectContext objectContext) where TObjectContext : ITsavoriteContext { - if (key.ReadOnlySpan.SequenceEqual("*"u8)) + if (!_hcollectTaskLock.TryWriteLock()) { - long cursor = 0; - long storeCursor = 0; + return GarnetStatus.NOTFOUND; + } - // Scan all hash keys in batches - do + try + { + if (keys[0].ReadOnlySpan.SequenceEqual("*"u8)) { - if (!DbScan(key, true, cursor, out storeCursor, out var hashKeys, 100, CmdStrings.HASH)) - { - return GarnetStatus.NOTFOUND; - } + long cursor = 0; + long storeCursor = 0; - // Process each hash key - foreach (var hashKey in hashKeys) + // Scan all hash keys in batches + do { - RMWObjectStoreOperation(hashKey, ref input, out _, ref objectContext); - } + if (!DbScan(keys[0], true, cursor, out storeCursor, out var hashKeys, 100, CmdStrings.HASH)) + { + return GarnetStatus.OK; + } + + // Process each hash key + foreach (var hashKey in hashKeys) + { + RMWObjectStoreOperation(hashKey, ref input, out _, ref objectContext); + } + + cursor = storeCursor; + } while (storeCursor != 0); + + return GarnetStatus.OK; + } - cursor = storeCursor; - } while (storeCursor != 0); + foreach (var key in keys) + { + RMWObjectStoreOperation(key.ToArray(), ref input, out _, ref objectContext); + } return GarnetStatus.OK; } - - return RMWObjectStoreOperation(key.ToArray(), ref input, out _, ref objectContext); + finally + { + _hcollectTaskLock.WriteUnlock(); + } } } } \ No newline at end of file diff --git a/libs/server/StoreWrapper.cs b/libs/server/StoreWrapper.cs index b024465436..a22af805b1 100644 --- a/libs/server/StoreWrapper.cs +++ b/libs/server/StoreWrapper.cs @@ -421,7 +421,6 @@ async Task HashCollectTask(int hashCollectFrequencySecs, CancellationToken token { var scratchBufferManager = new ScratchBufferManager(); using var storageSession = new StorageSession(this, scratchBufferManager, null, null, logger); - var key = ArgSlice.FromPinnedSpan("*"u8); while (true) { @@ -433,13 +432,9 @@ async Task HashCollectTask(int hashCollectFrequencySecs, CancellationToken token return; } - var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; - var input = new ObjectInput(header); + ExecuteHashCollect(scratchBufferManager, storageSession); - storageSession.HashCollect(key, ref input, ref storageSession.objectStoreBasicContext); - scratchBufferManager.Reset(); - - await Task.Delay(hashCollectFrequencySecs * 1000, token); + await Task.Delay(TimeSpan.FromSeconds(hashCollectFrequencySecs), token); } } catch (TaskCanceledException ex) when (token.IsCancellationRequested) @@ -450,6 +445,16 @@ async Task HashCollectTask(int hashCollectFrequencySecs, CancellationToken token { logger?.LogCritical(ex, "Unknown exception received for background hash collect task. Hash collect task won't be resumed."); } + + static void ExecuteHashCollect(ScratchBufferManager scratchBufferManager, StorageSession storageSession) + { + var header = new RespInputHeader(GarnetObjectType.Hash) { HashOp = HashOperation.HCOLLECT }; + var input = new ObjectInput(header); + + ReadOnlySpan key = [ArgSlice.FromPinnedSpan("*"u8)]; + storageSession.HashCollect(key, ref input, ref storageSession.objectStoreBasicContext); + scratchBufferManager.Reset(); + } } void DoCompaction() diff --git a/test/Garnet.test/Resp/ACL/RespCommandTests.cs b/test/Garnet.test/Resp/ACL/RespCommandTests.cs index 22fd160004..7b03e1c9ff 100644 --- a/test/Garnet.test/Resp/ACL/RespCommandTests.cs +++ b/test/Garnet.test/Resp/ACL/RespCommandTests.cs @@ -3340,7 +3340,7 @@ await CheckCommandsAsync( static async Task DoHCollectAsync(GarnetClient client) { var val = await client.ExecuteForStringResultAsync("HCOLLECT", ["foo"]); - ClassicAssert.IsNull(val); + ClassicAssert.AreEqual("OK", val); } } diff --git a/website/docs/commands/data-structures.md b/website/docs/commands/data-structures.md index d503754dc8..85977fbd21 100644 --- a/website/docs/commands/data-structures.md +++ b/website/docs/commands/data-structures.md @@ -439,7 +439,7 @@ Array reply: For each field, returns: #### Syntax ```bash - HCOLLECT key + HCOLLECT key [key ...] ``` Manualy trigger cleanup of expired field from memory for a given Hash set key. @@ -449,6 +449,7 @@ Use `*` as the key to collect it from all hash keys. #### Resp Reply Simple reply: OK response +Error reply: ERR HCOLLECT scan already in progress --- diff --git a/website/docs/commands/garnet-specific.md b/website/docs/commands/garnet-specific.md index d98b6bcfa0..88248feb0f 100644 --- a/website/docs/commands/garnet-specific.md +++ b/website/docs/commands/garnet-specific.md @@ -42,6 +42,25 @@ Simple string reply: OK. --- +### HCOLLECT + +#### Syntax + +```bash + HCOLLECT key [key ...] +``` + +Manualy trigger cleanup of expired field from memory for a given Hash set key. + +Use `*` as the key to collect it from all hash keys. + +#### Resp Reply + +Simple reply: OK response +Error reply: ERR HCOLLECT scan already in progress + +--- + ### COSCAN #### Syntax diff --git a/website/docs/getting-started/configuration.md b/website/docs/getting-started/configuration.md index eadab48d3d..ac0d130154 100644 --- a/website/docs/getting-started/configuration.md +++ b/website/docs/getting-started/configuration.md @@ -119,7 +119,7 @@ For all available command line settings, run `GarnetServer.exe -h` or `GarnetSer | **WaitForCommit** | ```--aof-commit-wait``` | ```bool``` | | Wait for AOF to flush the commit before returning results to client. Warning: will greatly increase operation latency. | | **AofSizeLimit** | ```--aof-size-limit``` | ```string``` | Memory size | Maximum size of AOF (rounds down to power of 2) after which unsafe truncation will be applied. Left empty AOF will grow without bound unless a checkpoint is taken | | **CompactionFrequencySecs** | ```--compaction-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) | -| **HashCollectFrequencySecs** | ```--hcollect-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. | +| **HashCollectFrequencySecs** | ```--hcollect-freq``` | ```int``` | Integer in range:
[0, MaxValue] | Frequency in seconds for the background task to perform Hash collection. 0 = disabled. Hash collect is used to delete expired fields from hash without waiting for a write operation. Use the HCOLLECT API to collect on-demand. | | **CompactionType** | ```--compaction-type``` | ```LogCompactionType``` | None, Shift, Scan, Lookup | Hybrid log compaction type. Value options: None - No compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss) | | **CompactionForceDelete** | ```--compaction-force-delete``` | ```bool``` | | Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied. If false, take a checkpoint to actually delete the older data files from disk. | | **CompactionMaxSegments** | ```--compaction-max-segments``` | ```int``` | Integer in range:
[0, MaxValue] | Number of log segments created on disk before compaction triggers. | From 764a7b4c43e24c529920e1c2a5cfa382945d7a4b Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Sat, 14 Dec 2024 19:17:27 +0530 Subject: [PATCH 19/24] Review comment fixes --- libs/server/Objects/Hash/HashObjectImpl.cs | 57 ++++++++++------------ 1 file changed, 27 insertions(+), 30 deletions(-) diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index 4668020c0c..90bc75f3b4 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -133,20 +133,14 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output) continue; } - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + WriteHashItem(ref output, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, item); } } else { foreach (var item in hash) { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + WriteHashItem(ref output, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, item); } } } @@ -158,6 +152,14 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output) if (isMemory) ptrHandle.Dispose(); output.Length = (int)(curr - ptr); } + + static void WriteHashItem(ref SpanByteAndMemory output, ref bool isMemory, ref MemoryHandle ptrHandle, ref byte* ptr, ref byte* curr, ref byte* end, System.Collections.Generic.KeyValuePair item) + { + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } } private void HashDelete(ref ObjectInput input, byte* output) @@ -347,34 +349,14 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou continue; } - if (HashOperation.HKEYS == op) - { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } - else - { - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } - _output.result1++; + WriteHashEntry(ref output, op, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, ref _output, item); } } else { foreach (var item in hash) { - if (HashOperation.HKEYS == op) - { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } - else - { - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } - _output.result1++; + WriteHashEntry(ref output, op, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, ref _output, item); } } } @@ -386,6 +368,21 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou if (isMemory) ptrHandle.Dispose(); output.Length = (int)(curr - ptr); } + + static void WriteHashEntry(ref SpanByteAndMemory output, HashOperation op, ref bool isMemory, ref MemoryHandle ptrHandle, ref byte* ptr, ref byte* curr, ref byte* end, ref ObjectOutputHeader _output, System.Collections.Generic.KeyValuePair item) + { + if (HashOperation.HKEYS == op) + { + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + else + { + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + } + _output.result1++; + } } private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) From dc69d9841b890379ee969e529cadc08e1e74e01d Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Sat, 14 Dec 2024 19:59:10 +0530 Subject: [PATCH 20/24] Test fix, maybe --- test/Garnet.test/RespHashTests.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index def709d87a..e89023e61c 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -201,7 +201,7 @@ public async Task CanDoHLenWithExpire() db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); var result = db.HashLength("user:user1"); ClassicAssert.AreEqual(3, result); - await Task.Delay(110); + await Task.Delay(150); result = db.HashLength("user:user1"); ClassicAssert.AreEqual(2, result); db.HashSet("user:user1", [new HashEntry("Year", "new2021")]); // Trigger deletion of expired field From 444fdf66842e843a8b901a9d401ef9dda3d886a5 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Tue, 17 Dec 2024 13:46:16 +0530 Subject: [PATCH 21/24] Review command fix --- libs/server/Objects/Hash/HashObjectImpl.cs | 72 +++++++--------------- 1 file changed, 23 insertions(+), 49 deletions(-) diff --git a/libs/server/Objects/Hash/HashObjectImpl.cs b/libs/server/Objects/Hash/HashObjectImpl.cs index 90bc75f3b4..14f6a84a41 100644 --- a/libs/server/Objects/Hash/HashObjectImpl.cs +++ b/libs/server/Objects/Hash/HashObjectImpl.cs @@ -124,24 +124,19 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } - if (HasExpirableItems()) - { - foreach (var item in hash) - { - if (IsExpired(item.Key)) - { - continue; - } + var isExpirable = HasExpirableItems(); - WriteHashItem(ref output, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, item); - } - } - else + foreach (var item in hash) { - foreach (var item in hash) + if (isExpirable && IsExpired(item.Key)) { - WriteHashItem(ref output, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, item); + continue; } + + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } } finally @@ -152,14 +147,6 @@ private void HashGetAll(ref ObjectInput input, ref SpanByteAndMemory output) if (isMemory) ptrHandle.Dispose(); output.Length = (int)(curr - ptr); } - - static void WriteHashItem(ref SpanByteAndMemory output, ref bool isMemory, ref MemoryHandle ptrHandle, ref byte* ptr, ref byte* curr, ref byte* end, System.Collections.Generic.KeyValuePair item) - { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } } private void HashDelete(ref ObjectInput input, byte* output) @@ -340,24 +327,26 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou while (!RespWriteUtils.WriteArrayLength(count, ref curr, end)) ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - if (HasExpirableItems()) + var isExpirable = HasExpirableItems(); + + foreach (var item in hash) { - foreach (var item in hash) + if (isExpirable && IsExpired(item.Key)) { - if (IsExpired(item.Key)) - { - continue; - } + continue; + } - WriteHashEntry(ref output, op, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, ref _output, item); + if (HashOperation.HKEYS == op) + { + while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } - } - else - { - foreach (var item in hash) + else { - WriteHashEntry(ref output, op, ref isMemory, ref ptrHandle, ref ptr, ref curr, ref end, ref _output, item); + while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) + ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); } + _output.result1++; } } finally @@ -368,21 +357,6 @@ private void HashGetKeysOrValues(ref ObjectInput input, ref SpanByteAndMemory ou if (isMemory) ptrHandle.Dispose(); output.Length = (int)(curr - ptr); } - - static void WriteHashEntry(ref SpanByteAndMemory output, HashOperation op, ref bool isMemory, ref MemoryHandle ptrHandle, ref byte* ptr, ref byte* curr, ref byte* end, ref ObjectOutputHeader _output, System.Collections.Generic.KeyValuePair item) - { - if (HashOperation.HKEYS == op) - { - while (!RespWriteUtils.WriteBulkString(item.Key, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } - else - { - while (!RespWriteUtils.WriteBulkString(item.Value, ref curr, end)) - ObjectUtils.ReallocateOutput(ref output, ref isMemory, ref ptr, ref ptrHandle, ref curr, ref end); - } - _output.result1++; - } } private void HashIncrement(ref ObjectInput input, ref SpanByteAndMemory output) From e99e19d1ae59c071fc4167cf128d116b78f2455c Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Tue, 17 Dec 2024 14:11:20 +0530 Subject: [PATCH 22/24] Fixed test failure --- test/Garnet.test/RespHashTests.cs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index e89023e61c..0b91ecd521 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -67,7 +67,7 @@ public async Task CanSetAndGetOnePairWithExpire() db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); string r = db.HashGet("user:user1", "Title"); ClassicAssert.AreEqual("Tsavorite", r); - await Task.Delay(110); + await Task.Delay(200); r = db.HashGet("user:user1", "Title"); ClassicAssert.IsNull(r); } @@ -80,7 +80,7 @@ public async Task CanSetWithExpireAndRemoveExpireByCallingSetAgain() db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]); db.HashFieldExpire("user:user1", ["Title"], TimeSpan.FromMilliseconds(100)); db.HashSet("user:user1", [new HashEntry("Title", "Tsavorite")]); - await Task.Delay(110); + await Task.Delay(200); string r = db.HashGet("user:user1", "Title"); ClassicAssert.AreEqual("Tsavorite", r); } @@ -238,7 +238,7 @@ public async Task CanDoGetAllWithExpire() ClassicAssert.AreEqual(hashEntries.Length, result.Select(r => r.Name).Distinct().Count()); ClassicAssert.IsTrue(hashEntries.OrderBy(e => e.Name).SequenceEqual(result.OrderBy(r => r.Name))); - await Task.Delay(110); + await Task.Delay(200); result = db.HashGetAll("user:user1"); ClassicAssert.AreEqual(hashEntries.Length - 1, result.Length); @@ -274,7 +274,7 @@ public async Task CanDoHExistsWithExpire() var result = db.HashExists(new RedisKey("user:user1"), "Title"); ClassicAssert.IsTrue(result); - await Task.Delay(110); + await Task.Delay(200); result = db.HashExists(new RedisKey("user:user1"), "Title"); ClassicAssert.IsFalse(result); @@ -309,7 +309,7 @@ public async Task CanDoHStrLenWithExire() long r = db.HashStringLength("user:user1", "Title"); ClassicAssert.AreEqual(9, r); - await Task.Delay(110); + await Task.Delay(200); r = db.HashStringLength("user:user1", "Title"); ClassicAssert.AreEqual(0, r); @@ -346,7 +346,7 @@ public async Task CanDoHKeysWithExpire() ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Year"))); ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Company"))); - await Task.Delay(110); + await Task.Delay(200); result = db.HashKeys("user:user1"); ClassicAssert.AreEqual(2, result.Length); @@ -389,7 +389,7 @@ public async Task CanDoHValsWithExpire() ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("2021"))); ClassicAssert.IsTrue(Array.Exists(result, t => t.Equals("Acme"))); - await Task.Delay(110); + await Task.Delay(200); result = db.HashValues("user:user1"); ClassicAssert.AreEqual(2, result.Length); @@ -432,7 +432,7 @@ public async Task CanDoHIncrByWithExpire() var result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field2"), -4); ClassicAssert.AreEqual(-3, result); - await Task.Delay(110); + await Task.Delay(200); result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field2"), -4); ClassicAssert.AreEqual(-4, result); @@ -490,7 +490,7 @@ public async Task CheckHashIncrementDoublePrecisionWithExpire() var result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field1"), 2.2222222222); ClassicAssert.AreEqual(3.3333333333, result, 1e-15); - await Task.Delay(110); + await Task.Delay(200); result = db.HashIncrement(new RedisKey("user:user1"), new RedisValue("Field1"), 2.2222222222); ClassicAssert.AreEqual(2.2222222222, result, 1e-15); @@ -518,7 +518,7 @@ public async Task CanDoHSETNXCommandWithExpire() db.HashFieldExpire("user:user1", ["Field"], TimeSpan.FromMilliseconds(100)); db.HashSet(new RedisKey("user:user1"), new RedisValue("Field"), new RedisValue("Hello"), When.NotExists); - await Task.Delay(110); + await Task.Delay(200); string result = db.HashGet("user:user1", "Field"); ClassicAssert.IsNull(result); // SetNX should not reset the expiration @@ -603,7 +603,7 @@ public async Task CanDoRandomFieldWithExpire() string field = db.HashRandomField(hashKey); ClassicAssert.AreEqual(field, "Title"); - await Task.Delay(110); + await Task.Delay(200); field = db.HashRandomField(hashKey); ClassicAssert.IsNull(field); @@ -623,7 +623,7 @@ public async Task CanDoRandomFieldsWithExpire() ClassicAssert.AreEqual(field.Length, 1); ClassicAssert.AreEqual("Title", field[0]); - await Task.Delay(110); + await Task.Delay(200); field = db.HashRandomFields(hashKey, 10).Select(x => (string)x).ToArray(); ClassicAssert.AreEqual(field.Length, 0); @@ -715,7 +715,7 @@ public async Task CanDoHashScanWithExpire() ClassicAssert.IsTrue(((IScanningCursor)members).Cursor == 0); ClassicAssert.IsTrue(members.Count() == 4, "HSCAN with MATCH failed."); - await Task.Delay(110); + await Task.Delay(200); // HSCAN with match members = db.HashScan("user:user789", "email*"); @@ -818,7 +818,7 @@ public async Task CanDoHMGETWithExpire() ClassicAssert.AreEqual("email@example.com", members[0]); ClassicAssert.AreEqual("email1@example.com", members[1]); - await Task.Delay(110); + await Task.Delay(200); members = (string[])db.Execute("HMGET", "user:user789", "email", "email1"); ClassicAssert.IsNull(members[0]); From 6c08adb60a7ea0fd639d967320a5ab8f74adbf62 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 19 Dec 2024 13:41:20 +0530 Subject: [PATCH 23/24] Test fix --- test/Garnet.test/RespHashTests.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 0b91ecd521..939ed96718 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -1193,7 +1193,7 @@ public async Task CanDoHashCollect() var orginalMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); - await Task.Delay(1000); + await Task.Delay(1200); var newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); ClassicAssert.AreEqual(newMemory, orginalMemory); @@ -1205,7 +1205,7 @@ public async Task CanDoHashCollect() ClassicAssert.Less(newMemory, orginalMemory); orginalMemory = newMemory; - await Task.Delay(1000); + await Task.Delay(1200); newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); ClassicAssert.AreEqual(newMemory, orginalMemory); From b1c4bbdb95c2862e059e6a4205233e34ae1d5621 Mon Sep 17 00:00:00 2001 From: Vijay-Nirmal Date: Thu, 19 Dec 2024 18:20:58 +0530 Subject: [PATCH 24/24] Test fix --- test/Garnet.test/RespHashTests.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/Garnet.test/RespHashTests.cs b/test/Garnet.test/RespHashTests.cs index 939ed96718..c8f5f01e56 100644 --- a/test/Garnet.test/RespHashTests.cs +++ b/test/Garnet.test/RespHashTests.cs @@ -1185,7 +1185,7 @@ public async Task CanDoHashCollect() ClassicAssert.AreEqual(1, (long)results[0]); ClassicAssert.AreEqual(1, (long)results[1]); - result = db.Execute("HEXPIRE", "myhash", "2", "FIELDS", "2", "field3", "field4"); + result = db.Execute("HEXPIRE", "myhash", "3", "FIELDS", "2", "field3", "field4"); results = (RedisResult[])result; ClassicAssert.AreEqual(2, results.Length); ClassicAssert.AreEqual(1, (long)results[0]); @@ -1205,7 +1205,7 @@ public async Task CanDoHashCollect() ClassicAssert.Less(newMemory, orginalMemory); orginalMemory = newMemory; - await Task.Delay(1200); + await Task.Delay(2200); newMemory = (long)db.Execute("MEMORY", "USAGE", "myhash"); ClassicAssert.AreEqual(newMemory, orginalMemory);