Skip to content

Commit

Permalink
Merge branch 'main' into penumbra-protos-v0.57.0
Browse files Browse the repository at this point in the history
  • Loading branch information
jtieri authored Jul 31, 2023
2 parents 8cc4135 + ab1c4fc commit 0317512
Show file tree
Hide file tree
Showing 23 changed files with 437 additions and 1,398 deletions.
22 changes: 22 additions & 0 deletions .github/workflows/interchaintest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,28 @@ jobs:
- name: interchaintest
run: make interchaintest-fee-middleware

fee-grant:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v1
with:
go-version: 1.20
id: go

- name: checkout relayer
uses: actions/checkout@v2

- uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: interchaintest
run: make interchaintest-fee-grant

scenarios:
runs-on: ubuntu-latest
steps:
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,5 @@ dist/

# Don't commit the vendor directory if anyone runs 'go mod vendor'.
/vendor

go.work.sum
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ interchaintest-misbehaviour:
interchaintest-fee-middleware:
cd interchaintest && go test -race -v -run TestRelayerFeeMiddleware .

interchaintest-fee-grant:
cd interchaintest && go test -race -v -run TestRelayerFeeGrant .

interchaintest-scenario: ## Scenario tests are suitable for simple networks of 1 validator and no full nodes. They test specific functionality.
cd interchaintest && go test -timeout 30m -race -v -run TestScenario ./...

Expand Down
35 changes: 24 additions & 11 deletions cmd/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -1070,9 +1070,10 @@ $ %s query unrelayed-acks demo-path channel-0`,

func queryClientsExpiration(a *appState) *cobra.Command {
cmd := &cobra.Command{
Use: "clients-expiration path",
Short: "query for light clients expiration date",
Args: withUsage(cobra.ExactArgs(1)),
Use: "clients-expiration path",
Aliases: []string{"ce"},
Short: "query for light clients expiration date",
Args: withUsage(cobra.ExactArgs(1)),
Example: strings.TrimSpace(fmt.Sprintf(`
$ %s query clients-expiration demo-path`,
appName,
Expand All @@ -1095,17 +1096,29 @@ $ %s query clients-expiration demo-path`,
return err
}

srcExpiration, err := relayer.QueryClientExpiration(cmd.Context(), c[src], c[dst])
if err != nil {
return err
srcExpiration, srcClientInfo, errSrc := relayer.QueryClientExpiration(cmd.Context(), c[src], c[dst])
if errSrc != nil && !strings.Contains(errSrc.Error(), "light client not found") {
return errSrc
}
dstExpiration, err := relayer.QueryClientExpiration(cmd.Context(), c[dst], c[src])
if err != nil {
return err
dstExpiration, dstClientInfo, errDst := relayer.QueryClientExpiration(cmd.Context(), c[dst], c[src])
if errDst != nil && !strings.Contains(errDst.Error(), "light client not found") {
return errDst
}

// if only the src light client is found, just print info for source light client
if errSrc == nil && errDst != nil {
fmt.Fprintln(cmd.OutOrStdout(), relayer.SPrintClientExpiration(c[src], srcExpiration, srcClientInfo))
return nil
}

// if only the dst light client is found, just print info for destination light client
if errDst == nil && errSrc != nil {
fmt.Fprintln(cmd.OutOrStdout(), relayer.SPrintClientExpiration(c[dst], dstExpiration, dstClientInfo))
return nil
}

fmt.Fprintf(cmd.OutOrStdout(), relayer.SPrintClientExpiration(c[src], srcExpiration))
fmt.Fprintf(cmd.OutOrStdout(), relayer.SPrintClientExpiration(c[dst], dstExpiration))
fmt.Fprintln(cmd.OutOrStdout(), relayer.SPrintClientExpiration(c[src], srcExpiration, srcClientInfo))
fmt.Fprintln(cmd.OutOrStdout(), relayer.SPrintClientExpiration(c[dst], dstExpiration, dstClientInfo))

return nil
},
Expand Down
1,238 changes: 0 additions & 1,238 deletions go.work.sum

This file was deleted.

8 changes: 4 additions & 4 deletions interchaintest/feegrant_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,12 @@ func genMnemonic(t *testing.T) string {
return mn
}

// TestScenarioFeegrantBasic Feegrant on a single chain
// Run this test with e.g. go test -timeout 300s -run ^TestScenarioFeegrantBasic$ github.com/cosmos/relayer/v2/ibctest.
// TestRelayerFeeGrant Feegrant on a single chain
// Run this test with e.g. go test -timeout 300s -run ^TestRelayerFeeGrant$ github.com/cosmos/relayer/v2/ibctest.
//
// Helpful to debug:
// docker ps -a --format {{.Names}} then e.g. docker logs gaia-1-val-0-TestScenarioFeegrantBasic 2>&1 -f
func TestScenarioFeegrantBasic(t *testing.T) {
// docker ps -a --format {{.Names}} then e.g. docker logs gaia-1-val-0-TestRelayerFeeGrant 2>&1 -f
func TestRelayerFeeGrant(t *testing.T) {
ctx := context.Background()
logger := zaptest.NewLogger(t)

Expand Down
16 changes: 9 additions & 7 deletions relayer/chains/cosmos/cosmos_chain_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ func NewCosmosChainProcessor(log *zap.Logger, provider *CosmosProvider, metrics

const (
queryTimeout = 5 * time.Second
queryStateTimeout = 60 * time.Second
blockResultsQueryTimeout = 2 * time.Minute
latestHeightQueryRetryDelay = 1 * time.Second
latestHeightQueryRetries = 5
Expand Down Expand Up @@ -279,7 +280,7 @@ func (ccp *CosmosChainProcessor) Run(ctx context.Context, initialBlockHistory ui

// initializeConnectionState will bootstrap the connectionStateCache with the open connection state.
func (ccp *CosmosChainProcessor) initializeConnectionState(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, queryTimeout)
ctx, cancel := context.WithTimeout(ctx, queryStateTimeout)
defer cancel()
connections, err := ccp.chainProvider.QueryConnections(ctx)
if err != nil {
Expand All @@ -299,7 +300,7 @@ func (ccp *CosmosChainProcessor) initializeConnectionState(ctx context.Context)

// initializeChannelState will bootstrap the channelStateCache with the open channel state.
func (ccp *CosmosChainProcessor) initializeChannelState(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, queryTimeout)
ctx, cancel := context.WithTimeout(ctx, queryStateTimeout)
defer cancel()
channels, err := ccp.chainProvider.QueryChannels(ctx)
if err != nil {
Expand All @@ -315,12 +316,13 @@ func (ccp *CosmosChainProcessor) initializeChannelState(ctx context.Context) err
continue
}
ccp.channelConnections[ch.ChannelId] = ch.ConnectionHops[0]
ccp.channelStateCache[processor.ChannelKey{
k := processor.ChannelKey{
ChannelID: ch.ChannelId,
PortID: ch.PortId,
CounterpartyChannelID: ch.Counterparty.ChannelId,
CounterpartyPortID: ch.Counterparty.PortId,
}] = ch.State == chantypes.OPEN
}
ccp.channelStateCache.SetOpen(k, ch.State == chantypes.OPEN, ch.Ordering)
}
return nil
}
Expand Down Expand Up @@ -402,11 +404,11 @@ func (ccp *CosmosChainProcessor) queryCycle(ctx context.Context, persistence *qu
})

if err := eg.Wait(); err != nil {
ccp.log.Warn(
"Could not query block data. Consider checking if your RPC node is online, and that transaction indexing is enabled.",
ccp.log.Debug(
"Error querying block data",
zap.Int64("height", i),
zap.Error(err),
)
ccp.log.Debug("Error querying block data", zap.Error(err))

persistence.retriesAtLatestQueriedBlock++
if persistence.retriesAtLatestQueriedBlock >= blockMaxRetries {
Expand Down
10 changes: 5 additions & 5 deletions relayer/chains/cosmos/message_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func (ccp *CosmosChainProcessor) handlePacketMessage(eventType string, pi provid
}

if eventType == chantypes.EventTypeTimeoutPacket && pi.ChannelOrder == chantypes.ORDERED.String() {
ccp.channelStateCache[k] = false
ccp.channelStateCache.SetOpen(k, false, chantypes.ORDERED)
}

if !c.PacketFlow.ShouldRetainSequence(ccp.pathProcessors, k, ccp.chainProvider.ChainId(), eventType, pi.Sequence) {
Expand Down Expand Up @@ -78,19 +78,19 @@ func (ccp *CosmosChainProcessor) handleChannelMessage(eventType string, ci provi
}
}
if !found {
ccp.channelStateCache[channelKey] = false
ccp.channelStateCache.SetOpen(channelKey, false, ci.Order)
}
} else {
switch eventType {
case chantypes.EventTypeChannelOpenTry:
ccp.channelStateCache[channelKey] = false
ccp.channelStateCache.SetOpen(channelKey, false, ci.Order)
case chantypes.EventTypeChannelOpenAck, chantypes.EventTypeChannelOpenConfirm:
ccp.channelStateCache[channelKey] = true
ccp.channelStateCache.SetOpen(channelKey, true, ci.Order)
ccp.logChannelOpenMessage(eventType, ci)
case chantypes.EventTypeChannelCloseConfirm:
for k := range ccp.channelStateCache {
if k.PortID == ci.PortID && k.ChannelID == ci.ChannelID {
ccp.channelStateCache[k] = false
ccp.channelStateCache.SetOpen(channelKey, false, ci.Order)
break
}
}
Expand Down
10 changes: 5 additions & 5 deletions relayer/chains/cosmos/message_handlers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func TestChannelStateCache(t *testing.T) {

// The channel state is not open, but the entry should exist in the channelStateCache.
// MsgInitKey returns the ChannelKey with an empty counterparty channel ID.
require.False(t, ccp.channelStateCache[k.MsgInitKey()])
require.False(t, ccp.channelStateCache[k.MsgInitKey()].Open)

// Observe MsgChannelOpenAck, which does have counterparty channel ID.
ccp.handleChannelMessage(chantypes.EventTypeChannelOpenAck, msgOpenAck, c)
Expand All @@ -139,7 +139,7 @@ func TestChannelStateCache(t *testing.T) {

// The fully populated ChannelKey should now be the only entry for this channel.
// The channel now open.
require.True(t, ccp.channelStateCache[k])
require.True(t, ccp.channelStateCache[k].Open)
})

t.Run("handshake already occurred", func(t *testing.T) {
Expand All @@ -156,7 +156,7 @@ func TestChannelStateCache(t *testing.T) {

// Initialize channelStateCache with populated channel ID and counterparty channel ID.
// This emulates initializeChannelState after a recent channel handshake has completed
ccp.channelStateCache[k] = true
ccp.channelStateCache.SetOpen(k, true, chantypes.NONE)

// Observe MsgChannelOpenInit, which does not have counterparty channel ID.
ccp.handleChannelMessage(chantypes.EventTypeChannelOpenInit, msgOpenInit, c)
Expand All @@ -166,7 +166,7 @@ func TestChannelStateCache(t *testing.T) {

// The fully populated ChannelKey should still be the only entry for this channel.
// The channel is still marked open since it was open during initializeChannelState.
require.True(t, ccp.channelStateCache[k])
require.True(t, ccp.channelStateCache[k].Open)

// Observe MsgChannelOpenAck, which does have counterparty channel ID.
ccp.handleChannelMessage(chantypes.EventTypeChannelOpenAck, msgOpenAck, c)
Expand All @@ -175,6 +175,6 @@ func TestChannelStateCache(t *testing.T) {
require.Len(t, ccp.channelStateCache, 1)

// The fully populated ChannelKey should still be the only entry for this channel.
require.True(t, ccp.channelStateCache[k])
require.True(t, ccp.channelStateCache[k].Open)
})
}
23 changes: 23 additions & 0 deletions relayer/chains/cosmos/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -1096,6 +1096,29 @@ func (cc *CosmosProvider) QueryNextSeqRecv(ctx context.Context, height int64, ch
}, nil
}

// QueryNextSeqAck returns the next seqAck for a configured channel
func (cc *CosmosProvider) QueryNextSeqAck(ctx context.Context, height int64, channelid, portid string) (recvRes *chantypes.QueryNextSequenceReceiveResponse, err error) {
key := host.NextSequenceAckKey(portid, channelid)

value, proofBz, proofHeight, err := cc.QueryTendermintProof(ctx, height, key)
if err != nil {
return nil, err
}

// check if next sequence receive exists
if len(value) == 0 {
return nil, sdkerrors.Wrapf(chantypes.ErrChannelNotFound, "portID (%s), channelID (%s)", portid, channelid)
}

sequence := binary.BigEndian.Uint64(value)

return &chantypes.QueryNextSequenceReceiveResponse{
NextSequenceReceive: sequence,
Proof: proofBz,
ProofHeight: proofHeight,
}, nil
}

// QueryPacketCommitment returns the packet commitment proof at a given height
func (cc *CosmosProvider) QueryPacketCommitment(ctx context.Context, height int64, channelid, portid string, seq uint64) (comRes *chantypes.QueryPacketCommitmentResponse, err error) {
key := host.PacketCommitmentKey(portid, channelid, seq)
Expand Down
23 changes: 4 additions & 19 deletions relayer/chains/cosmos/tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ var (
rtyAtt = retry.Attempts(rtyAttNum)
rtyDel = retry.Delay(time.Millisecond * 400)
rtyErr = retry.LastErrorOnly(true)
numRegex = regexp.MustCompile("[0-9]+")
accountSeqRegex = regexp.MustCompile("account sequence mismatch, expected ([0-9]+), got ([0-9]+)")
defaultBroadcastWaitTimeout = 10 * time.Minute
errUnknown = "unknown"
)
Expand Down Expand Up @@ -660,32 +660,17 @@ func (cc *CosmosProvider) handleAccountSequenceMismatchError(sequenceGuard *Wall
panic("sequence guard not configured")
}

sequences := numRegex.FindAllString(err.Error(), -1)
if len(sequences) != 2 {
matches := accountSeqRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return
}
nextSeq, err := strconv.ParseUint(sequences[0], 10, 64)
nextSeq, err := strconv.ParseUint(matches[1], 10, 64)
if err != nil {
return
}
sequenceGuard.NextAccountSequence = nextSeq
}

// handleAccountSequenceMismatchError will parse the error string, e.g.:
// "account sequence mismatch, expected 10, got 9: incorrect account sequence"
// and update the next account sequence with the expected value.
// func (cc *CosmosProvider) handleAccountSequenceMismatchError(err error) {
// sequences := numRegex.FindAllString(err.Error(), -1)
// if len(sequences) != 2 {
// return
// }
// nextSeq, err := strconv.ParseUint(sequences[0], 10, 64)
// if err != nil {
// return
// }
// cc.nextAccountSeq = nextSeq
// }

// MsgCreateClient creates an sdk.Msg to update the client on src with consensus state from dst
func (cc *CosmosProvider) MsgCreateClient(
clientState ibcexported.ClientState,
Expand Down
2 changes: 1 addition & 1 deletion relayer/chains/mock/mock_chain_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func (mcp *MockChainProcessor) queryCycle(ctx context.Context, persistence *quer

// mocking all channels open
for channelKey := range ibcMessagesCache.PacketFlow {
channelStateCache[channelKey] = true
channelStateCache.SetOpen(channelKey, true, chantypes.NONE)
}

// now pass foundMessages to the path processors
Expand Down
8 changes: 4 additions & 4 deletions relayer/chains/penumbra/message_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,18 +63,18 @@ func (pcp *PenumbraChainProcessor) handleChannelMessage(eventType string, ci pro
}
}
if !found {
pcp.channelStateCache[channelKey] = false
pcp.channelStateCache.SetOpen(channelKey, false, ci.Order)
}
} else {
switch eventType {
case chantypes.EventTypeChannelOpenTry:
pcp.channelStateCache[channelKey] = false
pcp.channelStateCache.SetOpen(channelKey, false, ci.Order)
case chantypes.EventTypeChannelOpenAck, chantypes.EventTypeChannelOpenConfirm:
pcp.channelStateCache[channelKey] = true
pcp.channelStateCache.SetOpen(channelKey, true, ci.Order)
case chantypes.EventTypeChannelCloseConfirm:
for k := range pcp.channelStateCache {
if k.PortID == ci.PortID && k.ChannelID == ci.ChannelID {
pcp.channelStateCache[k] = false
pcp.channelStateCache.SetOpen(channelKey, false, ci.Order)
break
}
}
Expand Down
5 changes: 3 additions & 2 deletions relayer/chains/penumbra/penumbra_chain_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,12 +257,13 @@ func (pcp *PenumbraChainProcessor) initializeChannelState(ctx context.Context) e
continue
}
pcp.channelConnections[ch.ChannelId] = ch.ConnectionHops[0]
pcp.channelStateCache[processor.ChannelKey{
k := processor.ChannelKey{
ChannelID: ch.ChannelId,
PortID: ch.PortId,
CounterpartyChannelID: ch.Counterparty.ChannelId,
CounterpartyPortID: ch.Counterparty.PortId,
}] = ch.State == chantypes.OPEN
}
pcp.channelStateCache.SetOpen(k, ch.State == chantypes.OPEN, ch.Ordering)
}
return nil
}
Expand Down
Loading

0 comments on commit 0317512

Please sign in to comment.