diff --git a/abci.go b/abci.go new file mode 100644 index 0000000..f0c11f6 --- /dev/null +++ b/abci.go @@ -0,0 +1,9 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// BeginBlocker of claimsmanager module +func (k Keeper) BeginBlocker(ctx sdk.Context) { +} diff --git a/claims.go b/claims.go new file mode 100644 index 0000000..73cbccb --- /dev/null +++ b/claims.go @@ -0,0 +1,299 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" +) + +func (k Keeper) NewClaim(ctx sdk.Context, address string, chainID string, module types.ClaimType, srcChainID string, amount uint64) types.Claim { + return types.Claim{UserAddress: address, ChainId: chainID, Module: module, SourceChainId: srcChainID, Amount: amount} +} + +// GetClaim returns claim +func (k Keeper) GetClaim(ctx sdk.Context, chainID string, address string, module types.ClaimType, srcChainID string) (types.Claim, bool) { + data := types.Claim{} + store := prefix.NewStore(ctx.KVStore(k.storeKey), nil) + key := types.GetKeyClaim(chainID, address, module, srcChainID) + bz := store.Get(key) + if len(bz) == 0 { + return data, false + } + + k.cdc.MustUnmarshal(bz, &data) + return data, true +} + +// GetLastEpochClaim returns claim from last epoch +func (k Keeper) GetLastEpochClaim(ctx sdk.Context, chainID string, address string, module types.ClaimType, srcChainID string) (types.Claim, bool) { + data := types.Claim{} + store := prefix.NewStore(ctx.KVStore(k.storeKey), nil) + key := types.GetKeyLastEpochClaim(chainID, address, module, srcChainID) + bz := store.Get(key) + if len(bz) == 0 { + return data, false + } + + k.cdc.MustUnmarshal(bz, &data) + return data, true +} + +// SetClaim sets claim +func (k Keeper) SetClaim(ctx sdk.Context, claim *types.Claim) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), nil) + bz := k.cdc.MustMarshal(claim) + store.Set(types.GetKeyClaim(claim.ChainId, claim.UserAddress, claim.Module, claim.SourceChainId), bz) +} + +// SetLastEpochClaim sets claim for last epoch +func (k Keeper) SetLastEpochClaim(ctx sdk.Context, claim *types.Claim) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), nil) + bz := k.cdc.MustMarshal(claim) + store.Set(types.GetKeyLastEpochClaim(claim.ChainId, claim.UserAddress, claim.Module, claim.SourceChainId), bz) +} + +// DeleteClaim deletes claim +func (k Keeper) DeleteClaim(ctx sdk.Context, claim *types.Claim) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), nil) + store.Delete(types.GetKeyClaim(claim.ChainId, claim.UserAddress, claim.Module, claim.SourceChainId)) +} + +// DeleteLastEpochClaim deletes claim for last epoch +func (k Keeper) DeleteLastEpochClaim(ctx sdk.Context, claim *types.Claim) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), nil) + store.Delete(types.GetKeyLastEpochClaim(claim.ChainId, claim.UserAddress, claim.Module, claim.SourceChainId)) +} + +// IterateClaims iterates through zone claims. +func (k Keeper) IterateClaims(ctx sdk.Context, chainID string, fn func(index int64, data types.Claim) (stop bool)) { + // noop + if fn == nil { + return + } + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixClaim(chainID)) + defer iterator.Close() + + i := int64(0) + for ; iterator.Valid(); iterator.Next() { + data := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &data) + stop := fn(i, data) + if stop { + break + } + i++ + } +} + +// IterateUserClaims iterates through zone claims for a given address. +func (k Keeper) IterateUserClaims(ctx sdk.Context, chainID string, address string, fn func(index int64, data types.Claim) (stop bool)) { + // noop + if fn == nil { + return + } + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixUserClaim(chainID, address)) + defer iterator.Close() + + i := int64(0) + for ; iterator.Valid(); iterator.Next() { + data := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &data) + stop := fn(i, data) + if stop { + break + } + i++ + } +} + +// IterateLastEpochClaims iterates through zone claims from last epoch. +func (k Keeper) IterateLastEpochClaims(ctx sdk.Context, chainID string, fn func(index int64, data types.Claim) (stop bool)) { + // noop + if fn == nil { + return + } + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixLastEpochClaim(chainID)) + defer iterator.Close() + + i := int64(0) + for ; iterator.Valid(); iterator.Next() { + data := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &data) + stop := fn(i, data) + if stop { + break + } + i++ + } +} + +// IterateLastEpochUserClaims iterates through zone claims from last epoch for a given user. +func (k Keeper) IterateLastEpochUserClaims(ctx sdk.Context, chainID string, address string, fn func(index int64, data types.Claim) (stop bool)) { + // noop + if fn == nil { + return + } + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixLastEpochUserClaim(chainID, address)) + defer iterator.Close() + + i := int64(0) + for ; iterator.Valid(); iterator.Next() { + data := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &data) + stop := fn(i, data) + if stop { + break + } + i++ + } +} + +// IterateLastEpochUserClaims iterates through zone claims from last epoch for a given user. +func (k Keeper) IterateAllLastEpochClaims(ctx sdk.Context, fn func(index int64, key []byte, data types.Claim) (stop bool)) { + // noop + if fn == nil { + return + } + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.KeyPrefixLastEpochClaim) + defer iterator.Close() + + i := int64(0) + for ; iterator.Valid(); iterator.Next() { + data := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &data) + stop := fn(i, iterator.Key(), data) + if stop { + break + } + i++ + } +} + +// IterateAllClaims iterates through all claims. +func (k Keeper) IterateAllClaims(ctx sdk.Context, fn func(index int64, key []byte, data types.Claim) (stop bool)) { + // noop + if fn == nil { + return + } + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.KeyPrefixClaim) + defer iterator.Close() + + i := int64(0) + for ; iterator.Valid(); iterator.Next() { + data := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &data) + stop := fn(i, iterator.Key(), data) + if stop { + break + } + i++ + } +} + +// AllClaims returns a slice containing all claims from the store. +func (k Keeper) AllClaims(ctx sdk.Context) []*types.Claim { + claims := []*types.Claim{} + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.KeyPrefixClaim) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + claim := types.Claim{} + k.cdc.MustUnmarshal(iterator.Value(), &claim) + + claims = append(claims, &claim) + } + + return claims +} + +func (k Keeper) AllZoneClaims(ctx sdk.Context, chainID string) []*types.Claim { + claims := []*types.Claim{} + k.IterateClaims(ctx, chainID, func(_ int64, claim types.Claim) (stop bool) { + claims = append(claims, &claim) + return false + }) + return claims +} + +func (k Keeper) AllZoneUserClaims(ctx sdk.Context, chainID string, address string) []*types.Claim { + claims := []*types.Claim{} + k.IterateUserClaims(ctx, chainID, address, func(_ int64, claim types.Claim) (stop bool) { + claims = append(claims, &claim) + return false + }) + return claims +} + +func (k Keeper) AllZoneLastEpochClaims(ctx sdk.Context, chainID string) []*types.Claim { + claims := []*types.Claim{} + k.IterateLastEpochClaims(ctx, chainID, func(_ int64, claim types.Claim) (stop bool) { + claims = append(claims, &claim) + return false + }) + return claims +} + +func (k Keeper) AllZoneLastEpochUserClaims(ctx sdk.Context, chainID string, address string) []*types.Claim { + claims := []*types.Claim{} + k.IterateLastEpochUserClaims(ctx, chainID, address, func(_ int64, claim types.Claim) (stop bool) { + claims = append(claims, &claim) + return false + }) + return claims +} + +// ClearClaims deletes all the current epoch claims of the given zone. +func (k Keeper) ClearClaims(ctx sdk.Context, chainID string) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixClaim(chainID)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + store.Delete(key) + } +} + +// ClearLastEpochClaims deletes all the last epoch claims of the given zone. +func (k Keeper) ClearLastEpochClaims(ctx sdk.Context, chainID string) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixLastEpochClaim(chainID)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + store.Delete(key) + } +} + +// ArchiveAndGarbageCollectClaims deletes all the last epoch claims and moves the current epoch claims to the last epoch store. +func (k Keeper) ArchiveAndGarbageCollectClaims(ctx sdk.Context, chainID string) { + k.ClearLastEpochClaims(ctx, chainID) + + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.GetPrefixClaim(chainID)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + store.Delete(key) + newKey := types.KeyPrefixLastEpochClaim + newKey = append(newKey, key[1:]...) // update prefix from KeyPrefixClaim to KeyPrefixLastEpochClaim + store.Set(newKey, iterator.Value()) + } +} diff --git a/claims_test.go b/claims_test.go new file mode 100644 index 0000000..7014be5 --- /dev/null +++ b/claims_test.go @@ -0,0 +1,266 @@ +package keeper_test + +import ( + "github.com/ingenuity-build/quicksilver/utils" + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" +) + +var testClaims = []types.Claim{ + // test user claim on chainB (using osmosis pool) + { + UserAddress: testAddress, + // ChainId: suite.chainB.ChainID, + Module: types.ClaimTypeOsmosisPool, + SourceChainId: "osmosis-1", + Amount: 5000000, + }, + // test user claim on chainB (liquid) + { + UserAddress: testAddress, + // ChainId: suite.chainB.ChainID, + Module: types.ClaimTypeLiquidToken, + SourceChainId: "", + Amount: 5000000, + }, + // random user claim on chainB (using osmosis pool) + { + UserAddress: utils.GenerateAccAddressForTest().String(), + // ChainId: suite.chainB.ChainID, + Module: types.ClaimTypeOsmosisPool, + SourceChainId: "osmosis-1", + Amount: 15000000, + }, + // zero value claim + { + UserAddress: "quick16pxh2v4hr28h2gkntgfk8qgh47pfmjfhzgeure", + // ChainId: suite.chainB.ChainID, + Module: types.ClaimTypeLiquidToken, + SourceChainId: "osmosis-1", + Amount: 0, + }, + // test user claim on "cosmoshub-4" (liquid) + { + UserAddress: testAddress, + ChainId: "cosmoshub-4", + Module: types.ClaimTypeLiquidToken, + SourceChainId: "", + Amount: 10000000, + }, + // random user claim on "cosmoshub-4" (liquid) + { + UserAddress: utils.GenerateAccAddressForTest().String(), + ChainId: "cosmoshub-4", + Module: types.ClaimTypeLiquidToken, + SourceChainId: "", + Amount: 15000000, + }, +} + +func (suite *KeeperTestSuite) TestKeeper_NewClaim() { + type args struct { + address string + chainID string + module types.ClaimType + srcChainID string + amount uint64 + } + tests := []struct { + name string + args args + want types.Claim + }{ + { + "blank", + args{}, + types.Claim{}, + }, + { + "valid", + args{ + testAddress, + suite.chainB.ChainID, + types.ClaimTypeLiquidToken, + "", + 5000000, + }, + types.Claim{ + UserAddress: testAddress, + ChainId: suite.chainB.ChainID, + Module: types.ClaimTypeLiquidToken, + SourceChainId: "", + Amount: 5000000, + }, + }, + } + + k := suite.GetQuicksilverApp(suite.chainA).ClaimsManagerKeeper + for _, tt := range tests { + suite.Run(tt.name, func() { + got := k.NewClaim(suite.chainA.GetContext(), tt.args.address, tt.args.chainID, tt.args.module, tt.args.srcChainID, tt.args.amount) + suite.Require().Equal(tt.want, got) + }) + } +} + +func (suite *KeeperTestSuite) TestKeeper_ClaimStore() { + k := suite.GetQuicksilverApp(suite.chainA).ClaimsManagerKeeper + + testClaims[0].ChainId = suite.chainB.ChainID + testClaims[1].ChainId = suite.chainB.ChainID + testClaims[2].ChainId = suite.chainB.ChainID + testClaims[3].ChainId = suite.chainB.ChainID + + // no claim set + var getClaim types.Claim + var found bool + + getClaim, found = k.GetClaim(suite.chainA.GetContext(), suite.chainB.ChainID, testAddress, types.ClaimTypeOsmosisPool, "osmosis-1") + suite.Require().False(found) + + // set claim + k.SetClaim(suite.chainA.GetContext(), &testClaims[0]) + + getClaim, found = k.GetClaim(suite.chainA.GetContext(), suite.chainB.ChainID, testAddress, types.ClaimTypeOsmosisPool, "osmosis-1") + suite.Require().True(found) + suite.Require().Equal(testClaims[0], getClaim) + + // delete claim + k.DeleteClaim(suite.chainA.GetContext(), &getClaim) + getClaim, found = k.GetClaim(suite.chainA.GetContext(), suite.chainB.ChainID, testAddress, types.ClaimTypeOsmosisPool, "osmosis-1") + suite.Require().False(found) + + // iterators + var claims []*types.Claim + + k.SetClaim(suite.chainA.GetContext(), &testClaims[0]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[1]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[2]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[3]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[4]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[5]) + + claims = k.AllClaims(suite.chainA.GetContext()) + suite.Require().Equal(6, len(claims)) + + claims = k.AllZoneClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + suite.Require().Equal(4, len(claims)) + + claims = k.AllZoneClaims(suite.chainA.GetContext(), "cosmoshub-4") + suite.Require().Equal(2, len(claims)) + + // archive (last epoch) + k.ArchiveAndGarbageCollectClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + + getClaim, found = k.GetLastEpochClaim(suite.chainA.GetContext(), suite.chainB.ChainID, testAddress, types.ClaimTypeOsmosisPool, "osmosis-1") + suite.Require().True(found) + suite.Require().Equal(testClaims[0], getClaim) + + // "cosmoshub-4 was not archived so this should not be found" + getClaim, found = k.GetLastEpochClaim(suite.chainA.GetContext(), "cosmoshub-4", testAddress, types.ClaimTypeLiquidToken, "") + suite.Require().False(found) + + // set archive claim + k.SetLastEpochClaim(suite.chainA.GetContext(), &testClaims[4]) + + getClaim, found = k.GetLastEpochClaim(suite.chainA.GetContext(), "cosmoshub-4", testAddress, types.ClaimTypeLiquidToken, "") + suite.Require().True(found) + suite.Require().Equal(testClaims[4], getClaim) + + // delete archive claim + k.DeleteLastEpochClaim(suite.chainA.GetContext(), &getClaim) + getClaim, found = k.GetLastEpochClaim(suite.chainA.GetContext(), "cosmoshub-4", testAddress, types.ClaimTypeLiquidToken, "") + suite.Require().False(found) + + // iterators + // we expect none as claims have been archived + claims = k.AllZoneClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + suite.Require().Equal(0, len(claims)) + + // we expect the archived claims for chainB + claims = k.AllZoneLastEpochClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + suite.Require().Equal(4, len(claims)) + + // clear + k.ClearClaims(suite.chainA.GetContext(), "cosmoshub-4") + // we expect none as claims have been cleared + claims = k.AllZoneClaims(suite.chainA.GetContext(), "cosmoshub-4") + suite.Require().Equal(0, len(claims)) + + // we archive current claims (none) to ensure the last epoch claims are correctly set + k.ArchiveAndGarbageCollectClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + + // we expect none as claims have been archived + claims = k.AllZoneClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + suite.Require().Equal(0, len(claims)) + + // we expect none as no current claims existed when we archived + claims = k.AllZoneLastEpochClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + suite.Require().Equal(0, len(claims)) +} + +// func (suite *KeeperTestSuite) TestKeeper_IterateLastEpochUserClaims() { +// k := suite.GetQuicksilverApp(suite.chainA).ClaimsManagerKeeper + +// setClaims[0].ChainId = suite.chainB.ChainID +// setClaims[1].ChainId = suite.chainB.ChainID +// setClaims[2].ChainId = suite.chainB.ChainID + +// k.SetLastEpochClaim(suite.chainA.GetContext(), &setClaims[0]) +// k.SetLastEpochClaim(suite.chainA.GetContext(), &setClaims[1]) +// k.SetLastEpochClaim(suite.chainA.GetContext(), &setClaims[2]) +// k.SetLastEpochClaim(suite.chainA.GetContext(), &setClaims[3]) +// k.SetLastEpochClaim(suite.chainA.GetContext(), &setClaims[4]) + +// type args struct { +// chainID string +// address string +// fn func(index int64, data types.Claim) (stop bool) +// } +// tests := []struct { +// name string +// args args +// }{ +// { +// "blank", +// args{}, +// }, +// { +// "bad_chain_id", +// args{ +// chainID: "badchain", +// address: testAddress, +// fn: func(idx int64, data types.Claim) (stop bool) { +// fmt.Printf("iterator [%d]: %v\n", idx, data) +// return false +// }, +// }, +// }, +// { +// "suite.chainB.ChainID", +// args{ +// chainID: suite.chainB.ChainID, +// address: testAddress, +// fn: func(idx int64, data types.Claim) (stop bool) { +// fmt.Printf("iterator [%d]: %v\n", idx, data) +// return false +// }, +// }, +// }, +// { +// "chainId_cosmoshub-4", +// args{ +// chainID: "cosmoshub-4", +// address: testAddress, +// fn: func(idx int64, data types.Claim) (stop bool) { +// fmt.Printf("iterator [%d]: %v\n", idx, data) +// return false +// }, +// }, +// }, +// } +// for _, tt := range tests { +// suite.Run(tt.name, func() { +// k.IterateLastEpochUserClaims(suite.chainA.GetContext(), tt.args.chainID, tt.args.address, tt.args.fn) +// }) +// } +// } diff --git a/grpc_query.go b/grpc_query.go new file mode 100644 index 0000000..42eeb77 --- /dev/null +++ b/grpc_query.go @@ -0,0 +1,97 @@ +package keeper + +import ( + "bytes" + "context" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" +) + +var _ types.QueryServer = Keeper{} + +func (k Keeper) Claims(c context.Context, req *types.QueryClaimsRequest) (*types.QueryClaimsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + + var claims []types.Claim + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixClaim) + + pageRes, err := query.FilteredPaginate(store, req.Pagination, func(_, value []byte, accumulate bool) (bool, error) { + var claim types.Claim + if err := k.cdc.Unmarshal(value, &claim); err != nil { + return false, err + } + + if claim.ChainId == req.ChainId { + claims = append(claims, claim) + return true, nil + } + + return false, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryClaimsResponse{ + Claims: claims, + Pagination: pageRes, + }, nil +} + +func (k Keeper) LastEpochClaims(c context.Context, q *types.QueryClaimsRequest) (*types.QueryClaimsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + out := []types.Claim{} + + k.IterateLastEpochClaims(ctx, q.ChainId, func(_ int64, claim types.Claim) (stop bool) { + out = append(out, claim) + return false + }) + + return &types.QueryClaimsResponse{Claims: out}, nil +} + +func (k Keeper) UserClaims(c context.Context, q *types.QueryClaimsRequest) (*types.QueryClaimsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + out := []types.Claim{} + + addrBytes := []byte(q.Address) + + k.IterateAllClaims(ctx, func(_ int64, key []byte, claim types.Claim) (stop bool) { + // check for the presence of the addr bytes in the key. + // first prefix byte is 0x00; so cater for that! Then + 1 to skip the separator. + idx := bytes.Index(key[1:], []byte{0x00}) + 1 + 1 + if bytes.Equal(key[idx:idx+len(addrBytes)], addrBytes) { + out = append(out, claim) + } + return false + }) + return &types.QueryClaimsResponse{Claims: out}, nil +} + +func (k Keeper) UserLastEpochClaims(c context.Context, q *types.QueryClaimsRequest) (*types.QueryClaimsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + out := []types.Claim{} + + addrBytes := []byte(q.Address) + k.IterateAllLastEpochClaims(ctx, func(_ int64, key []byte, claim types.Claim) (stop bool) { + // check for the presence of the addr bytes in the key. + // First byte is 0x01 here, so no need to consider it; + 1 to skip the separator. + idx := bytes.Index(key, []byte{0x00}) + 1 + if bytes.Equal(key[idx:idx+len(addrBytes)], addrBytes) { + out = append(out, claim) + } + return false + }) + + return &types.QueryClaimsResponse{Claims: out}, nil +} diff --git a/grpc_query_test.go b/grpc_query_test.go new file mode 100644 index 0000000..cbddae2 --- /dev/null +++ b/grpc_query_test.go @@ -0,0 +1,101 @@ +package keeper_test + +import ( + "context" + + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" +) + +func (suite *KeeperTestSuite) TestKeeper_Queries() { + k := suite.GetQuicksilverApp(suite.chainA).ClaimsManagerKeeper + + // now that we have a kepper set the chainID of chainB + testClaims[0].ChainId = suite.chainB.ChainID + testClaims[1].ChainId = suite.chainB.ChainID + testClaims[2].ChainId = suite.chainB.ChainID + testClaims[3].ChainId = suite.chainB.ChainID + + tests := []struct { + name string + malleate func() + req *types.QueryClaimsRequest + queryFn func(context.Context, *types.QueryClaimsRequest) (*types.QueryClaimsResponse, error) + expectLength int + }{ + { + "Claims_chainB", + func() {}, + &types.QueryClaimsRequest{ + ChainId: suite.chainB.ChainID, + }, + k.Claims, + 4, + }, + { + "Claims_cosmoshub", + func() {}, + &types.QueryClaimsRequest{ + ChainId: "cosmoshub-4", + }, + k.Claims, + 2, + }, + { + "UserClaims_testAddress", + func() {}, + &types.QueryClaimsRequest{ + Address: testAddress, + }, + k.UserClaims, + 3, + }, + { + "LastEpochClaims_chainB", + func() { + k.ArchiveAndGarbageCollectClaims(suite.chainA.GetContext(), suite.chainB.ChainID) + }, + &types.QueryClaimsRequest{ + ChainId: suite.chainB.ChainID, + }, + k.LastEpochClaims, + 4, + }, + { + "LastEpochClaims_cosmoshub", + func() { + }, + &types.QueryClaimsRequest{ + ChainId: "cosmoshub-4", + }, + k.LastEpochClaims, + 0, // none expected as this zone was not archived + }, + { + "UserLastEpochClaims_testAddress", + func() { + }, + &types.QueryClaimsRequest{ + Address: testAddress, + }, + k.UserLastEpochClaims, + 2, // 2 expected from chainB, 1 ommited as it was not archived + }, + } + + k.SetClaim(suite.chainA.GetContext(), &testClaims[0]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[1]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[2]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[3]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[4]) + k.SetClaim(suite.chainA.GetContext(), &testClaims[5]) + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.malleate() + resp, err := tt.queryFn(suite.chainA.GetContext(), tt.req) + suite.Require().NoError(err) + suite.Require().NotNil(resp.Claims) + suite.Require().Equal(tt.expectLength, len(resp.Claims)) + }) + } +} diff --git a/hooks.go b/hooks.go new file mode 100644 index 0000000..9140048 --- /dev/null +++ b/hooks.go @@ -0,0 +1,42 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + epochstypes "github.com/ingenuity-build/quicksilver/x/epochs/types" +) + +func (k Keeper) BeforeEpochStart(ctx sdk.Context, epochIdentifier string, epochNumber int64) error { + if epochIdentifier == "epoch" && epochNumber > 1 { + if err := k.StoreSelfConsensusState(ctx, "epoch"); err != nil { + k.Logger(ctx).Error("unable to store consensus state", "error", err) + return err + } + } + return nil +} + +func (k Keeper) AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64) error { + return nil +} + +// ___________________________________________________________________________________________________ + +// Hooks wrapper struct for incentives keeper +type Hooks struct { + k Keeper +} + +var _ epochstypes.EpochHooks = Hooks{} + +func (k Keeper) Hooks() Hooks { + return Hooks{k} +} + +// epochs hooks +func (h Hooks) BeforeEpochStart(ctx sdk.Context, epochIdentifier string, epochNumber int64) error { + return h.k.BeforeEpochStart(ctx, epochIdentifier, epochNumber) +} + +func (h Hooks) AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64) error { + return h.k.AfterEpochEnd(ctx, epochIdentifier, epochNumber) +} diff --git a/keeper.go b/keeper.go new file mode 100644 index 0000000..1d4c36c --- /dev/null +++ b/keeper.go @@ -0,0 +1,74 @@ +package keeper + +import ( + "fmt" + "strconv" + "strings" + + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + ibcclienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + ibckeeper "github.com/cosmos/ibc-go/v5/modules/core/keeper" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" + "github.com/tendermint/tendermint/libs/log" +) + +type Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + IBCKeeper ibckeeper.Keeper +} + +// NewKeeper returns a new instance of participationrewards Keeper. +// This function will panic on failure. +func NewKeeper( + cdc codec.Codec, + key storetypes.StoreKey, + ibcKeeper ibckeeper.Keeper, +) Keeper { + return Keeper{ + cdc: cdc, + storeKey: key, + IBCKeeper: ibcKeeper, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func (k Keeper) StoreSelfConsensusState(ctx sdk.Context, key string) error { + var height ibcclienttypes.Height + if strings.Contains(ctx.ChainID(), "-") { + revisionNum, err := strconv.ParseUint(strings.Split(ctx.ChainID(), "-")[1], 10, 64) + if err != nil { + k.Logger(ctx).Error("Error getting revision number for client ") + return err + } + + height = ibcclienttypes.Height{ + RevisionNumber: revisionNum, + RevisionHeight: uint64(ctx.BlockHeight() - 1), + } + } else { + // ONLY FOR TESTING - ibctesting module chains donot follow standard [chainname]-[num] structure + height = ibcclienttypes.Height{ + RevisionNumber: 0, // revision number for testchain1 is 0 (because parseChainId splits on '-') + RevisionHeight: uint64(ctx.BlockHeight() - 1), + } + } + + selfConsState, err := k.IBCKeeper.ClientKeeper.GetSelfConsensusState(ctx, height) + if err != nil { + k.Logger(ctx).Error("Error getting self consensus state of previous height") + return err + } + + state := selfConsState.(*ibctmtypes.ConsensusState) + k.SetSelfConsensusState(ctx, key, state) + + return nil +} diff --git a/keeper_test.go b/keeper_test.go new file mode 100644 index 0000000..ddf0c7e --- /dev/null +++ b/keeper_test.go @@ -0,0 +1,112 @@ +package keeper_test + +import ( + "testing" + "time" + + ibctesting "github.com/cosmos/ibc-go/v5/testing" + "github.com/stretchr/testify/suite" + + "github.com/ingenuity-build/quicksilver/app" + "github.com/ingenuity-build/quicksilver/utils" + icstypes "github.com/ingenuity-build/quicksilver/x/interchainstaking/types" +) + +var testAddress = utils.GenerateAccAddressForTest().String() + +func init() { + ibctesting.DefaultTestingAppInit = app.SetupTestingApp +} + +// TestKeeperTestSuite runs all the tests within this package. +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +func newQuicksilverPath(chainA, chainB *ibctesting.TestChain) *ibctesting.Path { + path := ibctesting.NewPath(chainA, chainB) + path.EndpointA.ChannelConfig.PortID = ibctesting.TransferPort + path.EndpointB.ChannelConfig.PortID = ibctesting.TransferPort + + return path +} + +type KeeperTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + + path *ibctesting.Path +} + +func (s *KeeperTestSuite) GetQuicksilverApp(chain *ibctesting.TestChain) *app.Quicksilver { + app, ok := chain.App.(*app.Quicksilver) + if !ok { + panic("not quicksilver app") + } + + return app +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *KeeperTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) // initializes 2 test chains + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) // convenience and readability + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) // convenience and readability + + suite.path = newQuicksilverPath(suite.chainA, suite.chainB) + suite.coordinator.SetupConnections(suite.path) + + suite.coordinator.CurrentTime = time.Now().UTC() + suite.coordinator.UpdateTime() + + suite.initTestZone() +} + +func (suite *KeeperTestSuite) initTestZone() { + // test zone + zone := icstypes.Zone{ + ConnectionId: suite.path.EndpointA.ConnectionID, + ChainId: suite.chainB.ChainID, + AccountPrefix: "bcosmos", + LocalDenom: "uqatom", + BaseDenom: "uatom", + ReturnToSender: false, + UnbondingEnabled: false, + LiquidityModule: true, + Decimals: 6, + } + suite.GetQuicksilverApp(suite.chainA).InterchainstakingKeeper.SetZone(suite.chainA.GetContext(), &zone) + + // cosmos zone + zone = icstypes.Zone{ + ConnectionId: "connection-77001", + ChainId: "cosmoshub-4", + AccountPrefix: "cosmos", + LocalDenom: "uqatom", + BaseDenom: "uatom", + ReturnToSender: false, + UnbondingEnabled: false, + LiquidityModule: true, + Decimals: 6, + } + suite.GetQuicksilverApp(suite.chainA).InterchainstakingKeeper.SetZone(suite.chainA.GetContext(), &zone) + + // osmosis zone + zone = icstypes.Zone{ + ConnectionId: "connection-77002", + ChainId: "osmosis-1", + AccountPrefix: "osmo", + LocalDenom: "uqosmo", + BaseDenom: "uosmo", + ReturnToSender: false, + UnbondingEnabled: false, + LiquidityModule: true, + Decimals: 6, + } + suite.GetQuicksilverApp(suite.chainA).InterchainstakingKeeper.SetZone(suite.chainA.GetContext(), &zone) +} diff --git a/msg_server.go b/msg_server.go new file mode 100644 index 0000000..c416edc --- /dev/null +++ b/msg_server.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" +) + +type msgServer struct { + *Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: &keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/self_consensus_state.go b/self_consensus_state.go new file mode 100644 index 0000000..e5859db --- /dev/null +++ b/self_consensus_state.go @@ -0,0 +1,32 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" + "github.com/ingenuity-build/quicksilver/x/claimsmanager/types" +) + +// GetSelfConsensusState returns consensus state stored every epoch +func (k Keeper) GetSelfConsensusState(ctx sdk.Context, key string) (ibctmtypes.ConsensusState, bool) { + store := ctx.KVStore(k.storeKey) + var selfConsensusState ibctmtypes.ConsensusState + + bz := store.Get(append(types.KeySelfConsensusState, []byte(key)...)) + if bz == nil { + return selfConsensusState, false + } + k.cdc.MustUnmarshal(bz, &selfConsensusState) + return selfConsensusState, true +} + +// SetSelfConsensusState sets the self consensus state +func (k Keeper) SetSelfConsensusState(ctx sdk.Context, key string, consState *ibctmtypes.ConsensusState) { + store := ctx.KVStore(k.storeKey) + store.Set(append(types.KeySelfConsensusState, []byte(key)...), k.cdc.MustMarshal(consState)) +} + +// DeleteSelfConsensusState deletes the self consensus state +func (k Keeper) DeleteSelfConsensusState(ctx sdk.Context, key string) { + store := ctx.KVStore(k.storeKey) + store.Delete(append(types.KeySelfConsensusState, []byte(key)...)) +} diff --git a/self_consensus_state_test.go b/self_consensus_state_test.go new file mode 100644 index 0000000..4c26188 --- /dev/null +++ b/self_consensus_state_test.go @@ -0,0 +1,20 @@ +package keeper_test + +func (s *KeeperTestSuite) TestGetSetDelete() { + k := s.GetQuicksilverApp(s.chainA).ClaimsManagerKeeper + ctx := s.chainA.GetContext() + + _, found := k.GetSelfConsensusState(ctx, "test") + s.Require().False(found) + + err := k.StoreSelfConsensusState(ctx, "test") + s.Require().NoError(err) + + _, found = k.GetSelfConsensusState(ctx, "test") + s.Require().True(found) + + k.DeleteSelfConsensusState(ctx, "test") + + _, found = k.GetSelfConsensusState(ctx, "test") + s.Require().False(found) +}