From 91cd19981565d40912bfb139b9023967de2ff27f Mon Sep 17 00:00:00 2001 From: Christian Lohr Date: Mon, 17 Jun 2024 12:31:43 +0200 Subject: [PATCH] fix: withold gravity batches until valset update (#1195) # Related Github tickets - https://github.com/VolumeFi/paloma/issues/1464 # Background This change adds a new constraint when handing out gravity batches to be relayed. They will now be withheld while valset updates are pending on the target chain, just like with SLC. I had to cut some corners to get this out sooner rather than later, but I also added a more decentralised event bus system for decoupling systems. It's currently very bare bone and as singleton hard to test, but if it grows, we can refactor into something more injectible. # Testing completed - [x] test coverage exists or has been added/updated - [ ] tested in a private testnet # Breaking changes - [x] I have checked my code for breaking changes - [x] If there are breaking changes, there is a supporting migration. --- app/app.go | 1 + .../evm/keeper/keeper_integration_test.go | 10 +-- .../evm/keeper/test_helpers_test.go | 1 + util/eventbus/bus.go | 60 ++++++++++++++++++ util/eventbus/bus_test.go | 48 +++++++++++++++ x/consensus/keeper/concensus_keeper.go | 61 +++++++++++-------- x/evm/keeper/keeper.go | 20 ++++-- x/evm/keeper/smart_contract_deployment.go | 4 +- x/evm/types/types.go | 2 + x/gravity/keeper/batch.go | 5 ++ x/gravity/keeper/grpc_query.go | 18 +++++- x/gravity/keeper/grpc_query_test.go | 9 ++- x/gravity/keeper/keeper.go | 3 + x/gravity/keeper/test_common.go | 5 ++ x/gravity/types/expected_keepers.go | 5 ++ 15 files changed, 212 insertions(+), 40 deletions(-) create mode 100644 util/eventbus/bus.go create mode 100644 util/eventbus/bus_test.go diff --git a/app/app.go b/app/app.go index c9d8fbfa..72875847 100644 --- a/app/app.go +++ b/app/app.go @@ -595,6 +595,7 @@ func New( app.DistrKeeper, app.TransferKeeper, app.EvmKeeper, + app.ConsensusKeeper, gravitymodulekeeper.NewGravityStoreGetter(keys[gravitymoduletypes.StoreKey]), authorityAddress, authcodec.NewBech32Codec(chainparams.ValidatorAddressPrefix), diff --git a/tests/integration/evm/keeper/keeper_integration_test.go b/tests/integration/evm/keeper/keeper_integration_test.go index 975e7b24..8e92fc71 100644 --- a/tests/integration/evm/keeper/keeper_integration_test.go +++ b/tests/integration/evm/keeper/keeper_integration_test.go @@ -127,7 +127,7 @@ func TestEndToEndForEvmArbitraryCall(t *testing.T) { }) require.NoError(t, err) - queue := consensustypes.Queue(keeper.ConsensusTurnstoneMessage, chainType, chainReferenceID) + queue := consensustypes.Queue(types.ConsensusTurnstoneMessage, chainType, chainReferenceID) msgs, err := f.consensusKeeper.GetMessagesForSigning(ctx, queue, operator) require.NoError(t, err) @@ -208,7 +208,7 @@ func TestFirstSnapshot_OnSnapshotBuilt(t *testing.T) { require.NoError(t, err) } - queue := fmt.Sprintf("evm/%s/%s", newChain.GetChainReferenceID(), keeper.ConsensusTurnstoneMessage) + queue := fmt.Sprintf("evm/%s/%s", newChain.GetChainReferenceID(), types.ConsensusTurnstoneMessage) msgs, err := f.consensusKeeper.GetMessagesFromQueue(ctx, queue, 100) require.NoError(t, err) require.Empty(t, msgs) @@ -279,7 +279,7 @@ func TestRecentPublishedSnapshot_OnSnapshotBuilt(t *testing.T) { require.NoError(t, err) } - queue := fmt.Sprintf("evm/%s/%s", newChain.GetChainReferenceID(), keeper.ConsensusTurnstoneMessage) + queue := fmt.Sprintf("evm/%s/%s", newChain.GetChainReferenceID(), types.ConsensusTurnstoneMessage) msgs, err := f.consensusKeeper.GetMessagesFromQueue(ctx, queue, 1) require.NoError(t, err) @@ -386,7 +386,7 @@ func TestOldPublishedSnapshot_OnSnapshotBuilt(t *testing.T) { require.NoError(t, err) } - queue := fmt.Sprintf("evm/%s/%s", newChain.GetChainReferenceID(), keeper.ConsensusTurnstoneMessage) + queue := fmt.Sprintf("evm/%s/%s", newChain.GetChainReferenceID(), types.ConsensusTurnstoneMessage) msgs, err := f.consensusKeeper.GetMessagesFromQueue(ctx, queue, 1) require.NoError(t, err) @@ -463,7 +463,7 @@ func TestInactiveChain_OnSnapshotBuilt(t *testing.T) { f.stakingKeeper.SetValidator(ctx, val) } - queue := fmt.Sprintf("evm/%s/%s", "bob", keeper.ConsensusTurnstoneMessage) + queue := fmt.Sprintf("evm/%s/%s", "bob", types.ConsensusTurnstoneMessage) _, err := f.valsetKeeper.TriggerSnapshotBuild(ctx) require.NoError(t, err) diff --git a/tests/integration/evm/keeper/test_helpers_test.go b/tests/integration/evm/keeper/test_helpers_test.go index 5434333a..a539e05d 100644 --- a/tests/integration/evm/keeper/test_helpers_test.go +++ b/tests/integration/evm/keeper/test_helpers_test.go @@ -311,6 +311,7 @@ func initFixture(t ginkgo.FullGinkgoTInterface) *fixture { distrKeeper, transferKeeper, evmKeeper, + consensusKeeper, gravitymodulekeeper.NewGravityStoreGetter(keys[gravitymoduletypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), authcodec.NewBech32Codec(params2.ValidatorAddressPrefix), diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go new file mode 100644 index 00000000..aea47e0a --- /dev/null +++ b/util/eventbus/bus.go @@ -0,0 +1,60 @@ +package eventbus + +import ( + "context" + "sort" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/palomachain/paloma/util/liblog" +) + +var gravityBatchBuilt = newEvent[GravityBatchBuiltEvent]() + +type ( + EventHandler[E any] func(context.Context, E) error + Event[E any] struct { + subscribers map[string]EventHandler[E] + } +) + +func newEvent[E any]() Event[E] { + return Event[E]{ + subscribers: make(map[string]EventHandler[E]), + } +} + +func (e Event[E]) Publish(ctx context.Context, event E) { + keys := make([]string, 0, len(e.subscribers)) + for k := range e.subscribers { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if e.subscribers[k] != nil { + logger := liblog.FromSDKLogger(sdk.UnwrapSDKContext(ctx).Logger()). + WithComponent("eventbus"). + WithFields("event", event). + WithFields("subscriber", k) + logger.Debug("Handling event") + if err := e.subscribers[k](ctx, event); err != nil { + logger.WithError(err).Error("Failed to handle event") + } + } + } +} + +func (e Event[E]) Subscribe(id string, fn EventHandler[E]) { + e.subscribers[id] = fn +} + +func (e Event[E]) Unsubscribe(id string) { + e.subscribers[id] = nil +} + +type GravityBatchBuiltEvent struct { + ChainReferenceID string +} + +func GravityBatchBuilt() *Event[GravityBatchBuiltEvent] { + return &gravityBatchBuilt +} diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go new file mode 100644 index 00000000..0cfc387d --- /dev/null +++ b/util/eventbus/bus_test.go @@ -0,0 +1,48 @@ +package eventbus_test + +import ( + "context" + "testing" + + "cosmossdk.io/log" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/palomachain/paloma/util/eventbus" + "github.com/stretchr/testify/require" +) + +func TestEventBus(t *testing.T) { + ctx := sdk.Context{}. + WithLogger(log.NewNopLogger()). + WithContext(context.Background()) + eventbus.GravityBatchBuilt().Publish(ctx, eventbus.GravityBatchBuiltEvent{ + ChainReferenceID: "test-chain", + }) + + calls := make(map[string]int) + fn := func(_ context.Context, e eventbus.GravityBatchBuiltEvent) error { + calls[e.ChainReferenceID]++ + return nil + } + + eventbus.GravityBatchBuilt().Subscribe("test-1", fn) + require.Len(t, calls, 0, "should be empty") + + eventbus.GravityBatchBuilt().Publish(ctx, eventbus.GravityBatchBuiltEvent{ + ChainReferenceID: "test-chain", + }) + + require.NotNil(t, calls["test-chain"], "should have executed one.") + require.Equal(t, 1, calls["test-chain"], "should have executed one.") + + eventbus.GravityBatchBuilt().Subscribe("test-2", fn) + eventbus.GravityBatchBuilt().Publish(ctx, eventbus.GravityBatchBuiltEvent{ + ChainReferenceID: "test-chain", + }) + require.Equal(t, 3, calls["test-chain"], "should execute both subscribers.") + + eventbus.GravityBatchBuilt().Unsubscribe("test-1") + eventbus.GravityBatchBuilt().Publish(ctx, eventbus.GravityBatchBuiltEvent{ + ChainReferenceID: "test-chain", + }) + require.Equal(t, 4, calls["test-chain"], "should have removed one subscriber.") +} diff --git a/x/consensus/keeper/concensus_keeper.go b/x/consensus/keeper/concensus_keeper.go index 308f2b96..d053a116 100644 --- a/x/consensus/keeper/concensus_keeper.go +++ b/x/consensus/keeper/concensus_keeper.go @@ -115,39 +115,47 @@ func (k Keeper) GetMessagesForSigning(ctx context.Context, queueTypeName string, return msgs, nil } -// GetMessagesForRelaying returns messages for a single validator to relay. -func (k Keeper) GetMessagesForRelaying(ctx context.Context, queueTypeName string, valAddress sdk.ValAddress) (msgs []types.QueuedSignedMessageI, err error) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - msgs, err = k.GetMessagesFromQueue(sdkCtx, queueTypeName, 0) +// TODO: The infusion of EVM types into the consensus module is a bit of a code smell. +// We should consider moving the entire logic of message assignment and retrieval +// to the EVM module to keep the consensus module content-agnostic. +func (k Keeper) GetPendingValsetUpdates(ctx context.Context, queueTypeName string) ([]types.QueuedSignedMessageI, error) { + msgs, err := k.GetMessagesFromQueue(ctx, queueTypeName, 0) if err != nil { return nil, err } - // Check for existing valset update messages on any target chains - valsetUpdatesOnChainLkUp := make(map[string]uint64) - for _, v := range msgs { - cm, err := v.ConsensusMsg(k.cdc) + msgs = slice.Filter(msgs, func(msg types.QueuedSignedMessageI) bool { + cm, err := msg.ConsensusMsg(k.cdc) if err != nil { - liblog.FromSDKLogger(k.Logger(sdkCtx)).WithError(err).Error("Failed to get consensus msg") - continue + liblog.FromKeeper(ctx, k).WithError(err).Error("Failed to get consensus msg") + return false } - m, ok := cm.(*evmtypes.Message) if !ok { - continue + return false } - - action := m.GetAction() - _, ok = action.(*evmtypes.Message_UpdateValset) - if ok { - if _, found := valsetUpdatesOnChainLkUp[m.GetChainReferenceID()]; found { - // Looks like we already have a pending valset update for this chain, - // we want to keep the earlierst message ID for a valset update we found, - // so we can skip here. - continue - } - valsetUpdatesOnChainLkUp[m.GetChainReferenceID()] = v.GetId() + if _, ok = m.GetAction().(*evmtypes.Message_UpdateValset); !ok { + return false } + + return true + }) + + return msgs, nil +} + +// GetMessagesForRelaying returns messages for a single validator to relay. +func (k Keeper) GetMessagesForRelaying(ctx context.Context, queueTypeName string, valAddress sdk.ValAddress) (msgs []types.QueuedSignedMessageI, err error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + msgs, err = k.GetMessagesFromQueue(sdkCtx, queueTypeName, 0) + if err != nil { + return nil, err + } + + // Check for existing valset update messages on any target chains + valsetUpdatesOnChain, err := k.GetPendingValsetUpdates(ctx, queueTypeName) + if err != nil { + return nil, err } // Filter down to just messages for target chains without pending valset updates on them @@ -159,21 +167,20 @@ func (k Keeper) GetMessagesForRelaying(ctx context.Context, queueTypeName string return true } - m, ok := cm.(*evmtypes.Message) + _, ok := cm.(*evmtypes.Message) if !ok { // NO cross chain message, just return true return true } // Cross chain message for relaying, return only if no pending valset update on target chain - vuMid, found := valsetUpdatesOnChainLkUp[m.GetChainReferenceID()] - if !found { + if len(valsetUpdatesOnChain) < 1 { return true } // Looks like there is a valset update for the target chain, // only return true if this message is younger than the valset update - return msg.GetId() <= vuMid + return msg.GetId() <= valsetUpdatesOnChain[0].GetId() }) // Filter down to just messages assigned to this validator diff --git a/x/evm/keeper/keeper.go b/x/evm/keeper/keeper.go index cc7a8adf..47085201 100644 --- a/x/evm/keeper/keeper.go +++ b/x/evm/keeper/keeper.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" xchain "github.com/palomachain/paloma/internal/x-chain" + "github.com/palomachain/paloma/util/eventbus" keeperutil "github.com/palomachain/paloma/util/keeper" "github.com/palomachain/paloma/util/libcons" "github.com/palomachain/paloma/util/liblog" @@ -43,7 +44,6 @@ const ( ) const ( - ConsensusTurnstoneMessage = "evm-turnstone-message" ConsensusGetValidatorBalances = "validators-balances" ConsensusGetReferenceBlock = "reference-block" ConsensusCollectFundEvents = "collect-fund-events" @@ -61,7 +61,7 @@ type supportedChainInfo struct { var SupportedConsensusQueues = []supportedChainInfo{ { - subqueue: ConsensusTurnstoneMessage, + subqueue: types.ConsensusTurnstoneMessage, batch: false, msgType: &types.Message{}, processAttesationFunc: func(k Keeper) func(ctx context.Context, q consensus.Queuer, msg consensustypes.QueuedSignedMessageI) error { @@ -149,6 +149,18 @@ func NewKeeper( k.deploymentCache = deployment.NewCache(provideDeploymentCacheBootstrapper(k)) k.ider = keeperutil.NewIDGenerator(keeperutil.StoreGetterFn(k.provideSmartContractStore), []byte("id-key")) k.consensusChecker = libcons.New(k.Valset.GetCurrentSnapshot, k.cdc) + + eventbus.GravityBatchBuilt().Subscribe( + "gravity-keeper", + func(ctx context.Context, e eventbus.GravityBatchBuiltEvent) error { + ci, err := k.GetChainInfo(ctx, e.ChainReferenceID) + if err != nil { + return err + } + + return k.justInTimeValsetUpdate(ctx, ci) + }) + return k } @@ -608,7 +620,7 @@ func (m msgSender) SendValsetMsgForChain(ctx context.Context, chainInfo *types.C // clear all other instances of the update valset from the queue m.Logger(sdkCtx).Info("clearing previous instances of the update valset from the queue") - queueName := consensustypes.Queue(ConsensusTurnstoneMessage, xchainType, xchain.ReferenceID(chainInfo.GetChainReferenceID())) + queueName := consensustypes.Queue(types.ConsensusTurnstoneMessage, xchainType, xchain.ReferenceID(chainInfo.GetChainReferenceID())) messages, err := m.ConsensusKeeper.GetMessagesFromQueue(ctx, queueName, 0) if err != nil { m.Logger(sdkCtx).Error("unable to get messages from queue", "err", err) @@ -639,7 +651,7 @@ func (m msgSender) SendValsetMsgForChain(ctx context.Context, chainInfo *types.C // put update valset message into the queue msgID, err := m.ConsensusKeeper.PutMessageInQueue( ctx, - consensustypes.Queue(ConsensusTurnstoneMessage, xchainType, xchain.ReferenceID(chainInfo.GetChainReferenceID())), + consensustypes.Queue(types.ConsensusTurnstoneMessage, xchainType, xchain.ReferenceID(chainInfo.GetChainReferenceID())), &types.Message{ TurnstoneID: string(chainInfo.GetSmartContractUniqueID()), ChainReferenceID: chainInfo.GetChainReferenceID(), diff --git a/x/evm/keeper/smart_contract_deployment.go b/x/evm/keeper/smart_contract_deployment.go index b61c8195..564d87b2 100644 --- a/x/evm/keeper/smart_contract_deployment.go +++ b/x/evm/keeper/smart_contract_deployment.go @@ -164,7 +164,7 @@ func (k Keeper) AddSmartContractExecutionToConsensus( return k.ConsensusKeeper.PutMessageInQueue( ctx, consensustypes.Queue( - ConsensusTurnstoneMessage, + types.ConsensusTurnstoneMessage, xchainType, chainReferenceID, ), @@ -311,7 +311,7 @@ func (k Keeper) AddUploadSmartContractToConsensus( return k.ConsensusKeeper.PutMessageInQueue( ctx, consensustypes.Queue( - ConsensusTurnstoneMessage, + types.ConsensusTurnstoneMessage, xchainType, chainReferenceID, ), diff --git a/x/evm/types/types.go b/x/evm/types/types.go index ab1254f4..57a81739 100644 --- a/x/evm/types/types.go +++ b/x/evm/types/types.go @@ -1 +1,3 @@ package types + +const ConsensusTurnstoneMessage = "evm-turnstone-message" diff --git a/x/gravity/keeper/batch.go b/x/gravity/keeper/batch.go index 34e95810..0e73b08d 100644 --- a/x/gravity/keeper/batch.go +++ b/x/gravity/keeper/batch.go @@ -11,6 +11,7 @@ import ( "cosmossdk.io/store/prefix" "github.com/VolumeFi/whoops" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/palomachain/paloma/util/eventbus" "github.com/palomachain/paloma/x/gravity/types" ) @@ -77,6 +78,10 @@ func (k Keeper) BuildOutgoingTXBatch( return nil, err } + eventbus.GravityBatchBuilt().Publish(ctx, eventbus.GravityBatchBuiltEvent{ + ChainReferenceID: chainReferenceID, + }) + return batch, sdkCtx.EventManager().EmitTypedEvent( &types.EventOutgoingBatch{ BridgeContract: bridgeContract.GetAddress().Hex(), diff --git a/x/gravity/keeper/grpc_query.go b/x/gravity/keeper/grpc_query.go index 1d7faa5c..7361ae90 100644 --- a/x/gravity/keeper/grpc_query.go +++ b/x/gravity/keeper/grpc_query.go @@ -9,6 +9,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" utilkeeper "github.com/palomachain/paloma/util/keeper" + consensustypes "github.com/palomachain/paloma/x/consensus/types" + evmtypes "github.com/palomachain/paloma/x/evm/types" "github.com/palomachain/paloma/x/gravity/types" ) @@ -74,7 +76,21 @@ func (k Keeper) OutgoingTxBatches( req *types.QueryOutgoingTxBatchesRequest, ) (*types.QueryOutgoingTxBatchesResponse, error) { var batches []types.OutgoingTxBatch - err := k.IterateOutgoingTxBatches(sdk.UnwrapSDKContext(c), func(_ []byte, batch types.InternalOutgoingTxBatch) bool { + + // Check for pending valset messages on the queue + queue := consensustypes.Queue(evmtypes.ConsensusTurnstoneMessage, consensustypes.ChainTypeEVM, req.ChainReferenceId) + valsetMessagesOnQueue, err := k.consensusKeeper.GetPendingValsetUpdates(c, queue) + if err != nil { + return nil, err + } + + // Don't give out batches to relay if there are pending valset messages + if len(valsetMessagesOnQueue) > 0 { + return &types.QueryOutgoingTxBatchesResponse{ + Batches: []types.OutgoingTxBatch{}, + }, nil + } + err = k.IterateOutgoingTxBatches(sdk.UnwrapSDKContext(c), func(_ []byte, batch types.InternalOutgoingTxBatch) bool { batchChainReferenceID := batch.ChainReferenceID reqChainReferenceID := req.ChainReferenceId batchAssignee := batch.Assignee diff --git a/x/gravity/keeper/grpc_query_test.go b/x/gravity/keeper/grpc_query_test.go index 70fca6b8..70faa21c 100644 --- a/x/gravity/keeper/grpc_query_test.go +++ b/x/gravity/keeper/grpc_query_test.go @@ -2,6 +2,7 @@ package keeper import ( "bytes" + "math/big" "testing" "time" @@ -237,12 +238,18 @@ func TestLastBatchesRequest(t *testing.T) { defer func() { sdkCtx.Logger().Info("Asserting invariants at test end"); input.AssertInvariants() }() + // evm/test-chain/evm-turnstone-message + // evm/test-chain/evm-turnstone-message + input.EvmKeeper.AddSupportForNewChain(ctx, "test-chain", 42, 100, "0x123", big.NewInt(0)) + k := input.GravityKeeper createTestBatch(t, input, 2) createTestBatch(t, input, 3) - lastBatches, err := k.OutgoingTxBatches(ctx, &types.QueryOutgoingTxBatchesRequest{}) + lastBatches, err := k.OutgoingTxBatches(ctx, &types.QueryOutgoingTxBatchesRequest{ + ChainReferenceId: "test-chain", + }) require.NoError(t, err) expectedRes := types.QueryOutgoingTxBatchesResponse{ diff --git a/x/gravity/keeper/keeper.go b/x/gravity/keeper/keeper.go index 864230b8..49a9503a 100644 --- a/x/gravity/keeper/keeper.go +++ b/x/gravity/keeper/keeper.go @@ -39,6 +39,7 @@ type Keeper struct { accountKeeper types.AccountKeeper ibcTransferKeeper ibctransferkeeper.Keeper evmKeeper types.EVMKeeper + consensusKeeper types.ConsensusKeeper AddressCodec address.Codec storeGetter keeperutil.StoreGetter @@ -58,6 +59,7 @@ func NewKeeper( distributionKeeper distrkeeper.Keeper, ibcTransferKeeper ibctransferkeeper.Keeper, evmKeeper types.EVMKeeper, + consensusKeeper types.ConsensusKeeper, storeGetter keeperutil.StoreGetter, authority string, valAddressCodec address.Codec, @@ -71,6 +73,7 @@ func NewKeeper( accountKeeper: accKeeper, ibcTransferKeeper: ibcTransferKeeper, evmKeeper: evmKeeper, + consensusKeeper: consensusKeeper, storeGetter: storeGetter, AttestationHandler: nil, AddressCodec: valAddressCodec, diff --git a/x/gravity/keeper/test_common.go b/x/gravity/keeper/test_common.go index aba5a266..2fcc5bd7 100644 --- a/x/gravity/keeper/test_common.go +++ b/x/gravity/keeper/test_common.go @@ -277,6 +277,7 @@ type TestInput struct { Marshaler codec.Codec LegacyAmino *codec.LegacyAmino GravityStoreKey *storetypes.KVStoreKey + EvmKeeper evmkeeper.Keeper } func addValidators(t *testing.T, input *TestInput, count int) { @@ -725,6 +726,8 @@ func CreateTestEnv(t *testing.T) TestInput { ) require.NoError(t, err) + consensusRegistry.Add(evmKeeper) + k := NewKeeper( marshaler, accountKeeper, @@ -734,6 +737,7 @@ func CreateTestEnv(t *testing.T) TestInput { distKeeper, ibcTransferKeeper, evmKeeper, + consensusKeeper, NewGravityStoreGetter(gravityKey), "", authcodec.NewBech32Codec(chainparams.ValidatorAddressPrefix), @@ -811,6 +815,7 @@ func CreateTestEnv(t *testing.T) TestInput { Context: ctx, Marshaler: marshaler, LegacyAmino: legacyAmino, + EvmKeeper: *evmKeeper, } sdkCtx := sdk.UnwrapSDKContext(testInput.Context) diff --git a/x/gravity/types/expected_keepers.go b/x/gravity/types/expected_keepers.go index 873fef80..e678f070 100644 --- a/x/gravity/types/expected_keepers.go +++ b/x/gravity/types/expected_keepers.go @@ -11,6 +11,7 @@ import ( slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" xchain "github.com/palomachain/paloma/internal/x-chain" + consensustypes "github.com/palomachain/paloma/x/consensus/types" evmtypes "github.com/palomachain/paloma/x/evm/types" ) @@ -51,6 +52,10 @@ type SlashingKeeper interface { GetValidatorSigningInfo(ctx context.Context, address sdk.ConsAddress) (info slashingtypes.ValidatorSigningInfo, found error) } +type ConsensusKeeper interface { + GetPendingValsetUpdates(ctx context.Context, queueTypeName string) ([]consensustypes.QueuedSignedMessageI, error) +} + // AccountKeeper defines the interface contract required for account // functionality. type AccountKeeper interface {