diff --git a/CODEOWNERS b/CODEOWNERS index 0d8beae649c..79e45d18dc7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -8,8 +8,8 @@ /engine/consensus/** @AlexHentschel @durkmurder @jordanschalm # Execution Stream -/cmd/execution/** @ramtinms -/engine/execution/** @ramtinms +/cmd/execution/** @zhangchiqing +/engine/execution/** @zhangchiqing # Access Stream /access/** @peterargue @@ -18,19 +18,18 @@ /engine/access/** @peterargue # Verification Stream -/cmd/verification/** @ramtinms @yhassanzadeh13 -/engine/verification/** @ramtinms @yhassanzadeh13 -/module/chunking/** @ramtinms -/integration/tests/verification @ramtinms @yhassanzadeh13 +/cmd/verification/** @zhangchiqing +/engine/verification/** @zhangchiqing +/integration/tests/verification @zhangchiqing # Ledger Stream -/ledger/** @ramtinms @AlexHentschel +/ledger/** @AlexHentschel # FVM Stream -/fvm/** @ramtinms @janezpodhostnik +/fvm/** @janezpodhostnik # Networking Stream -/network/** @yhassanzadeh13 +/network/** @Kay-Zee # Cryptography Stream /crypto/** @tarakby @@ -39,13 +38,13 @@ /cmd/bootstrap/** @zhangchiqing # Dev Tools Stream -.github/workflows/** @gomisha -/insecure/** @gomisha @yhassanzadeh13 -/integration/benchnet2/** @gomisha -/tools/test_monitor/** @gomisha +.github/workflows/** @Kay-Zee +/insecure/** @Kay-Zee +/integration/benchnet2/** @Kay-Zee +/tools/test_monitor/** @Kay-Zee # Performance Stream -/integration/benchmark/** @gomisha +/integration/benchmark/** @Kay-Zee # Execution Sync /module/executiondatasync/** @peterargue diff --git a/Makefile b/Makefile index 8538c94d42d..60626c48d13 100644 --- a/Makefile +++ b/Makefile @@ -101,7 +101,7 @@ go-math-rand-check: # - "onflow/crypto/random" for deterministic randomness grep --include=\*.go \ --exclude=*test* --exclude=*helper* --exclude=*example* --exclude=*fixture* --exclude=*benchmark* --exclude=*profiler* \ - --exclude-dir=*test* --exclude-dir=*helper* --exclude-dir=*example* --exclude-dir=*fixture* --exclude-dir=*benchmark* --exclude-dir=*profiler* -rnw '"math/rand"'; \ + --exclude-dir=*test* --exclude-dir=*helper* --exclude-dir=*example* --exclude-dir=*fixture* --exclude-dir=*benchmark* --exclude-dir=*profiler* --exclude-dir=*emulator* -rnw '"math/rand"'; \ if [ $$? -ne 1 ]; then \ echo "[Error] Go production code should not use math/rand package"; exit 1; \ fi diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 656f4b06f89..841895851a1 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -152,6 +152,7 @@ type AccessNodeConfig struct { logTxTimeToFinalized bool logTxTimeToExecuted bool logTxTimeToFinalizedExecuted bool + logTxTimeToSealed bool retryEnabled bool rpcMetricsEnabled bool executionDataSyncEnabled bool @@ -243,6 +244,7 @@ func DefaultAccessNodeConfig() *AccessNodeConfig { logTxTimeToFinalized: false, logTxTimeToExecuted: false, logTxTimeToFinalizedExecuted: false, + logTxTimeToSealed: false, pingEnabled: false, retryEnabled: false, rpcMetricsEnabled: false, @@ -304,6 +306,7 @@ type FlowAccessNodeBuilder struct { CollectionsToMarkFinalized *stdmap.Times CollectionsToMarkExecuted *stdmap.Times BlocksToMarkExecuted *stdmap.Times + BlockTransactions *stdmap.IdentifierMap TransactionMetrics *metrics.TransactionCollector TransactionValidationMetrics *metrics.TransactionValidationCollector RestMetrics *metrics.RestCollector @@ -1240,6 +1243,10 @@ func (builder *FlowAccessNodeBuilder) extraFlags() { "log-tx-time-to-finalized-executed", defaultConfig.logTxTimeToFinalizedExecuted, "log transaction time to finalized and executed") + flags.BoolVar(&builder.logTxTimeToSealed, + "log-tx-time-to-sealed", + defaultConfig.logTxTimeToSealed, + "log transaction time to sealed") flags.BoolVar(&builder.pingEnabled, "ping-enabled", defaultConfig.pingEnabled, @@ -1683,6 +1690,11 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { return err } + builder.BlockTransactions, err = stdmap.NewIdentifierMap(10000) + if err != nil { + return err + } + builder.BlocksToMarkExecuted, err = stdmap.NewTimes(1 * 300) // assume 1 block per second * 300 seconds return err @@ -1694,6 +1706,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { builder.logTxTimeToFinalized, builder.logTxTimeToExecuted, builder.logTxTimeToFinalizedExecuted, + builder.logTxTimeToSealed, ) return nil }). @@ -1728,6 +1741,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { builder.BlocksToMarkExecuted, builder.Storage.Collections, builder.Storage.Blocks, + builder.BlockTransactions, ) if err != nil { return err diff --git a/cmd/bootstrap/utils/md5.go b/cmd/bootstrap/utils/md5.go index e885ed891e2..65823fd6e96 100644 --- a/cmd/bootstrap/utils/md5.go +++ b/cmd/bootstrap/utils/md5.go @@ -1,7 +1,6 @@ package utils // The google storage API only provides md5 and crc32 hence overriding the linter flag for md5 -// #nosec import ( "crypto/md5" //nolint:gosec "io" diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 9add704760f..c9c7899270f 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -52,7 +52,6 @@ import ( txmetrics "github.com/onflow/flow-go/engine/execution/computation/metrics" "github.com/onflow/flow-go/engine/execution/ingestion" "github.com/onflow/flow-go/engine/execution/ingestion/fetcher" - "github.com/onflow/flow-go/engine/execution/ingestion/loader" "github.com/onflow/flow-go/engine/execution/ingestion/stop" "github.com/onflow/flow-go/engine/execution/ingestion/uploader" exeprovider "github.com/onflow/flow-go/engine/execution/provider" @@ -1081,61 +1080,24 @@ func (exeNode *ExecutionNode) LoadIngestionEngine( exeNode.collectionRequester = reqEng } - if exeNode.exeConf.enableNewIngestionEngine { - _, core, err := ingestion.NewMachine( - node.Logger, - node.ProtocolEvents, - exeNode.collectionRequester, - colFetcher, - node.Storage.Headers, - node.Storage.Blocks, - node.Storage.Collections, - exeNode.executionState, - node.State, - exeNode.collector, - exeNode.computationManager, - exeNode.providerEngine, - exeNode.blockDataUploader, - exeNode.stopControl, - ) - - return core, err - } - - var blockLoader ingestion.BlockLoader - if exeNode.exeConf.enableStorehouse { - blockLoader = loader.NewUnfinalizedLoader(node.Logger, node.State, node.Storage.Headers, exeNode.executionState) - } else { - blockLoader = loader.NewUnexecutedLoader(node.Logger, node.State, node.Storage.Headers, exeNode.executionState) - } - - ingestionEng, err := ingestion.New( - exeNode.ingestionUnit, + _, core, err := ingestion.NewMachine( node.Logger, - node.EngineRegistry, + node.ProtocolEvents, + exeNode.collectionRequester, colFetcher, node.Storage.Headers, node.Storage.Blocks, node.Storage.Collections, - exeNode.computationManager, - exeNode.providerEngine, exeNode.executionState, + node.State, exeNode.collector, - node.Tracer, - exeNode.exeConf.extensiveLog, - exeNode.executionDataPruner, + exeNode.computationManager, + exeNode.providerEngine, exeNode.blockDataUploader, exeNode.stopControl, - blockLoader, ) - // TODO: we should solve these mutual dependencies better - // => https://github.com/dapperlabs/flow-go/issues/4360 - exeNode.collectionRequester.WithHandle(ingestionEng.OnCollection) - - node.ProtocolEvents.AddConsumer(ingestionEng) - - return ingestionEng, err + return core, err } // create scripts engine for handling script execution diff --git a/cmd/execution_config.go b/cmd/execution_config.go index c8ba7092c32..97f808ae6e7 100644 --- a/cmd/execution_config.go +++ b/cmd/execution_config.go @@ -69,11 +69,10 @@ type ExecutionConfig struct { // It works around an issue where some collection nodes are not configured with enough // this works around an issue where some collection nodes are not configured with enough // file descriptors causing connection failures. - onflowOnlyLNs bool - enableStorehouse bool - enableChecker bool - enableNewIngestionEngine bool - publicAccessID string + onflowOnlyLNs bool + enableStorehouse bool + enableChecker bool + publicAccessID string } func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { @@ -132,7 +131,9 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { flags.BoolVar(&exeConf.onflowOnlyLNs, "temp-onflow-only-lns", false, "do not use unless required. forces node to only request collections from onflow collection nodes") flags.BoolVar(&exeConf.enableStorehouse, "enable-storehouse", false, "enable storehouse to store registers on disk, default is false") flags.BoolVar(&exeConf.enableChecker, "enable-checker", true, "enable checker to check the correctness of the execution result, default is true") - flags.BoolVar(&exeConf.enableNewIngestionEngine, "enable-new-ingestion-engine", true, "enable new ingestion engine, default is true") + // deprecated. Retain it to prevent nodes that previously had this configuration from crashing. + var deprecatedEnableNewIngestionEngine bool + flags.BoolVar(&deprecatedEnableNewIngestionEngine, "enable-new-ingestion-engine", true, "enable new ingestion engine, default is true") flags.StringVar(&exeConf.publicAccessID, "public-access-id", "", "public access ID for the node") } diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index d04d5b5ad20..98fc1fc701a 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -148,6 +148,7 @@ type ObserverServiceConfig struct { logTxTimeToFinalized bool logTxTimeToExecuted bool logTxTimeToFinalizedExecuted bool + logTxTimeToSealed bool executionDataSyncEnabled bool executionDataIndexingEnabled bool executionDataDBMode string @@ -223,6 +224,7 @@ func DefaultObserverServiceConfig() *ObserverServiceConfig { logTxTimeToFinalized: false, logTxTimeToExecuted: false, logTxTimeToFinalizedExecuted: false, + logTxTimeToSealed: false, executionDataSyncEnabled: false, executionDataIndexingEnabled: false, executionDataDBMode: execution_data.ExecutionDataDBModeBadger.String(), @@ -659,6 +661,10 @@ func (builder *ObserverServiceBuilder) extraFlags() { "log-tx-time-to-finalized-executed", defaultConfig.logTxTimeToFinalizedExecuted, "log transaction time to finalized and executed") + flags.BoolVar(&builder.logTxTimeToSealed, + "log-tx-time-to-sealed", + defaultConfig.logTxTimeToSealed, + "log transaction time to sealed") flags.BoolVar(&builder.rpcMetricsEnabled, "rpc-metrics-enabled", defaultConfig.rpcMetricsEnabled, "whether to enable the rpc metrics") flags.BoolVar(&builder.executionDataIndexingEnabled, "execution-data-indexing-enabled", @@ -1671,6 +1677,7 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { builder.logTxTimeToFinalized, builder.logTxTimeToExecuted, builder.logTxTimeToFinalizedExecuted, + builder.logTxTimeToSealed, ) return nil }) diff --git a/cmd/util/cmd/export-evm-state/cmd.go b/cmd/util/cmd/export-evm-state/cmd.go new file mode 100644 index 00000000000..2927b9a313a --- /dev/null +++ b/cmd/util/cmd/export-evm-state/cmd.go @@ -0,0 +1,109 @@ +package evm_exporter + +import ( + "fmt" + "os" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagChain string + flagExecutionStateDir string + flagOutputDir string + flagStateCommitment string +) + +var Cmd = &cobra.Command{ + Use: "export-evm-state", + Short: "exports evm state into a several binary files", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagExecutionStateDir, "execution-state-dir", "", + "Execution Node state dir (where WAL logs are written") + _ = Cmd.MarkFlagRequired("execution-state-dir") + + Cmd.Flags().StringVar(&flagOutputDir, "output-dir", "", + "Directory to write new Execution State to") + _ = Cmd.MarkFlagRequired("output-dir") + + Cmd.Flags().StringVar(&flagStateCommitment, "state-commitment", "", + "State commitment (hex-encoded, 64 characters)") +} + +func run(*cobra.Command, []string) { + log.Info().Msg("start exporting evm state") + err := ExportEVMState(flagChain, flagExecutionStateDir, flagStateCommitment, flagOutputDir) + if err != nil { + log.Fatal().Err(err).Msg("cannot get export evm state") + } +} + +// ExportEVMState evm state +func ExportEVMState( + chainName string, + ledgerPath string, + targetState string, + outputPath string) error { + + chainID := flow.ChainID(chainName) + + storageRoot := evm.StorageAccountAddress(chainID) + rootOwner := string(storageRoot.Bytes()) + + payloads, err := util.ReadTrie(ledgerPath, util.ParseStateCommitment(targetState)) + if err != nil { + return err + } + + // filter payloads of evm storage + filteredPayloads := make(map[flow.RegisterID]*ledger.Payload, 0) + for _, payload := range payloads { + registerID, _, err := convert.PayloadToRegister(payload) + if err != nil { + return fmt.Errorf("failed to convert payload to register: %w", err) + } + if registerID.Owner == rootOwner { + filteredPayloads[registerID] = payload + } + } + + payloadsLedger := util.NewPayloadsLedger(filteredPayloads) + + exporter, err := state.NewExporter(payloadsLedger, storageRoot) + if err != nil { + return fmt.Errorf("failed to create exporter: %w", err) + } + + if _, err := os.Stat(outputPath); os.IsNotExist(err) { + err := os.Mkdir(outputPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create path: %w", err) + } + } + + fi, err := os.Create(outputPath) + if err != nil { + return err + } + defer fi.Close() + + err = exporter.Export(outputPath) + if err != nil { + return fmt.Errorf("failed to export: %w", err) + } + return nil +} diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index 281d1dbebbf..cefd8db691d 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -24,6 +24,7 @@ import ( export "github.com/onflow/flow-go/cmd/util/cmd/exec-data-json-export" edbs "github.com/onflow/flow-go/cmd/util/cmd/execution-data-blobstore/cmd" extract "github.com/onflow/flow-go/cmd/util/cmd/execution-state-extract" + evm_state_exporter "github.com/onflow/flow-go/cmd/util/cmd/export-evm-state" ledger_json_exporter "github.com/onflow/flow-go/cmd/util/cmd/export-json-execution-state" export_json_transactions "github.com/onflow/flow-go/cmd/util/cmd/export-json-transactions" extractpayloads "github.com/onflow/flow-go/cmd/util/cmd/extract-payloads-by-address" @@ -124,6 +125,7 @@ func addCommands() { rootCmd.AddCommand(debug_tx.Cmd) rootCmd.AddCommand(debug_script.Cmd) rootCmd.AddCommand(generate_authorization_fixes.Cmd) + rootCmd.AddCommand(evm_state_exporter.Cmd) } func initConfig() { diff --git a/consensus/hotstuff/consumer.go b/consensus/hotstuff/consumer.go index 1a5bfb175af..2fcf2e4703e 100644 --- a/consensus/hotstuff/consumer.go +++ b/consensus/hotstuff/consumer.go @@ -60,7 +60,7 @@ type VoteAggregationViolationConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) + OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) } // TimeoutAggregationViolationConsumer consumes outbound notifications about Active Pacemaker violations specifically @@ -138,7 +138,7 @@ type ParticipantConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnReceiveProposal(currentView uint64, proposal *model.Proposal) + OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) // OnReceiveQc notifications are produced by the EventHandler when it starts processing a // QuorumCertificate [QC] constructed by the node's internal vote aggregator. diff --git a/consensus/hotstuff/event_handler.go b/consensus/hotstuff/event_handler.go index a2134680389..6deda44eeca 100644 --- a/consensus/hotstuff/event_handler.go +++ b/consensus/hotstuff/event_handler.go @@ -39,7 +39,7 @@ type EventHandler interface { // consensus participant. // All inputs should be validated before feeding into this function. Assuming trusted data. // No errors are expected during normal operation. - OnReceiveProposal(proposal *model.Proposal) error + OnReceiveProposal(proposal *model.SignedProposal) error // OnLocalTimeout handles a local timeout event by creating a model.TimeoutObject and broadcasting it. // No errors are expected during normal operation. diff --git a/consensus/hotstuff/eventhandler/event_handler.go b/consensus/hotstuff/eventhandler/event_handler.go index 04fc873bcc0..7b4c3d2a6f4 100644 --- a/consensus/hotstuff/eventhandler/event_handler.go +++ b/consensus/hotstuff/eventhandler/event_handler.go @@ -164,7 +164,7 @@ func (e *EventHandler) OnReceiveTc(tc *flow.TimeoutCertificate) error { // consensus participant. // All inputs should be validated before feeding into this function. Assuming trusted data. // No errors are expected during normal operation. -func (e *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { +func (e *EventHandler) OnReceiveProposal(proposal *model.SignedProposal) error { block := proposal.Block curView := e.paceMaker.CurView() log := e.log.With(). @@ -429,7 +429,7 @@ func (e *EventHandler) proposeForNewViewIfPrimary() error { lastViewTC = nil } - // Construct Own Proposal + // Construct Own SignedProposal // CAUTION, design constraints: // (i) We cannot process our own proposal within the `EventHandler` right away. // (ii) We cannot add our own proposal to Forks here right away. @@ -491,7 +491,7 @@ func (e *EventHandler) proposeForNewViewIfPrimary() error { // It is called AFTER the block has been stored or found in Forks // It checks whether to vote for this block. // No errors are expected during normal operation. -func (e *EventHandler) processBlockForCurrentView(proposal *model.Proposal) error { +func (e *EventHandler) processBlockForCurrentView(proposal *model.SignedProposal) error { // sanity check that block is really for the current view: curView := e.paceMaker.CurView() block := proposal.Block @@ -526,7 +526,7 @@ func (e *EventHandler) processBlockForCurrentView(proposal *model.Proposal) erro // ownVote generates and forwards the own vote, if we decide to vote. // Any errors are potential symptoms of uncovered edge cases or corrupted internal state (fatal). // No errors are expected during normal operation. -func (e *EventHandler) ownVote(proposal *model.Proposal, curView uint64, nextLeader flow.Identifier) error { +func (e *EventHandler) ownVote(proposal *model.SignedProposal, curView uint64, nextLeader flow.Identifier) error { block := proposal.Block log := e.log.With(). Uint64("block_view", block.View). diff --git a/consensus/hotstuff/eventhandler/event_handler_test.go b/consensus/hotstuff/eventhandler/event_handler_test.go index b0c43fc3a82..f01a7760e40 100644 --- a/consensus/hotstuff/eventhandler/event_handler_test.go +++ b/consensus/hotstuff/eventhandler/event_handler_test.go @@ -132,14 +132,14 @@ func NewSafetyRules(t *testing.T) *SafetyRules { // SafetyRules will not vote for any block, unless the blockID exists in votable map safetyRules.On("ProduceVote", mock.Anything, mock.Anything).Return( - func(block *model.Proposal, _ uint64) *model.Vote { + func(block *model.SignedProposal, _ uint64) *model.Vote { _, ok := safetyRules.votable[block.Block.BlockID] if !ok { return nil } return createVote(block.Block) }, - func(block *model.Proposal, _ uint64) error { + func(block *model.SignedProposal, _ uint64) error { _, ok := safetyRules.votable[block.Block.BlockID] if !ok { return model.NewNoVoteErrorf("block not found") @@ -179,7 +179,7 @@ func NewForks(t *testing.T, finalized uint64) *Forks { } f.On("AddValidatedBlock", mock.Anything).Return(func(proposal *model.Block) error { - log.Info().Msgf("forks.AddValidatedBlock received Proposal for view: %v, QC: %v\n", proposal.View, proposal.QC.View) + log.Info().Msgf("forks.AddValidatedBlock received Block proposal for view: %v, QC: %v\n", proposal.View, proposal.QC.View) return f.addProposal(proposal) }).Maybe() @@ -228,14 +228,12 @@ type BlockProducer struct { } func (b *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.Header, error) { - return model.ProposalToFlow(&model.Proposal{ - Block: helper.MakeBlock( + return helper.SignedProposalToFlow(helper.MakeSignedProposal(helper.WithProposal( + helper.MakeProposal(helper.WithBlock(helper.MakeBlock( helper.WithBlockView(view), helper.WithBlockQC(qc), - helper.WithBlockProposer(b.proposerID), - ), - LastViewTC: lastViewTC, - }), nil + helper.WithBlockProposer(b.proposerID))), + helper.WithLastViewTC(lastViewTC))))), nil } func TestEventHandler(t *testing.T) { @@ -258,8 +256,8 @@ type EventHandlerSuite struct { initView uint64 // the current view at the beginning of the test case endView uint64 // the expected current view at the end of the test case - parentProposal *model.Proposal - votingProposal *model.Proposal + parentProposal *model.SignedProposal + votingProposal *model.SignedProposal qc *flow.QuorumCertificate tc *flow.TimeoutCertificate newview *model.NewViewEvent @@ -670,7 +668,7 @@ func (es *EventHandlerSuite) TestOnReceiveTc_NextLeaderProposes() { // proposed block should contain valid newest QC and lastViewTC expectedNewestQC := es.paceMaker.NewestQC() - proposal := model.ProposalFromFlow(header) + proposal := model.SignedProposalFromFlow(header) require.Equal(es.T(), expectedNewestQC, proposal.Block.QC) require.Equal(es.T(), es.paceMaker.LastViewTC(), proposal.LastViewTC) }).Once() @@ -1033,10 +1031,7 @@ func createVote(block *model.Block) *model.Vote { } } -func createProposal(view uint64, qcview uint64) *model.Proposal { +func createProposal(view uint64, qcview uint64) *model.SignedProposal { block := createBlockWithQC(view, qcview) - return &model.Proposal{ - Block: block, - SigData: nil, - } + return helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block)))) } diff --git a/consensus/hotstuff/eventloop/event_loop.go b/consensus/hotstuff/eventloop/event_loop.go index d7bc478490b..8a91d214355 100644 --- a/consensus/hotstuff/eventloop/event_loop.go +++ b/consensus/hotstuff/eventloop/event_loop.go @@ -22,7 +22,7 @@ import ( // it contains an attached insertionTime that is used to measure how long we have waited between queening proposal and // actually processing by `EventHandler`. type queuedProposal struct { - proposal *model.Proposal + proposal *model.SignedProposal insertionTime time.Time } @@ -263,7 +263,7 @@ func (el *EventLoop) loop(ctx context.Context) error { } // SubmitProposal pushes the received block to the proposals channel -func (el *EventLoop) SubmitProposal(proposal *model.Proposal) { +func (el *EventLoop) SubmitProposal(proposal *model.SignedProposal) { queueItem := queuedProposal{ proposal: proposal, insertionTime: time.Now(), diff --git a/consensus/hotstuff/eventloop/event_loop_test.go b/consensus/hotstuff/eventloop/event_loop_test.go index 8b6eeed5b25..2c40cecf5c8 100644 --- a/consensus/hotstuff/eventloop/event_loop_test.go +++ b/consensus/hotstuff/eventloop/event_loop_test.go @@ -75,7 +75,7 @@ func (s *EventLoopTestSuite) TestReadyDone() { // Test_SubmitQC tests that submitted proposal is eventually sent to event handler for processing func (s *EventLoopTestSuite) Test_SubmitProposal() { - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() processed := atomic.NewBool(false) s.eh.On("OnReceiveProposal", proposal).Run(func(args mock.Arguments) { processed.Store(true) @@ -229,7 +229,7 @@ func TestEventLoop_Timeout(t *testing.T) { go func() { defer wg.Done() for !processed.Load() { - eventLoop.SubmitProposal(helper.MakeProposal()) + eventLoop.SubmitProposal(helper.MakeSignedProposal()) } }() @@ -258,7 +258,7 @@ func TestReadyDoneWithStartTime(t *testing.T) { require.NoError(t, err) done := make(chan struct{}) - eh.On("OnReceiveProposal", mock.AnythingOfType("*model.Proposal")).Run(func(args mock.Arguments) { + eh.On("OnReceiveProposal", mock.AnythingOfType("*model.SignedProposal")).Run(func(args mock.Arguments) { require.True(t, time.Now().After(startTime)) close(done) }).Return(nil).Once() @@ -271,7 +271,7 @@ func TestReadyDoneWithStartTime(t *testing.T) { parentBlock := unittest.BlockHeaderFixture() block := unittest.BlockHeaderWithParentFixture(parentBlock) - eventLoop.SubmitProposal(model.ProposalFromFlow(block)) + eventLoop.SubmitProposal(model.SignedProposalFromFlow(block)) unittest.RequireCloseBefore(t, done, startTimeDuration+100*time.Millisecond, "proposal wasn't received") cancel() diff --git a/consensus/hotstuff/forks/block_builder_test.go b/consensus/hotstuff/forks/block_builder_test.go index 03daec535c1..62b4bf8bce5 100644 --- a/consensus/hotstuff/forks/block_builder_test.go +++ b/consensus/hotstuff/forks/block_builder_test.go @@ -106,7 +106,6 @@ func (bb *BlockBuilder) Proposals() ([]*model.Proposal, error) { PayloadHash: payloadHash, }, LastViewTC: lastViewTC, - SigData: nil, } proposal.Block.BlockID = makeBlockID(proposal.Block) diff --git a/consensus/hotstuff/forks/forks.go b/consensus/hotstuff/forks/forks.go index aa4db7f9853..bf7ee881b6c 100644 --- a/consensus/hotstuff/forks/forks.go +++ b/consensus/hotstuff/forks/forks.go @@ -401,7 +401,7 @@ func (f *Forks) checkForAdvancingFinalization(certifiedBlock *model.CertifiedBlo parentBlock := parentVertex.(*BlockContainer).Block() // Note: we assume that all stored blocks pass Forks.EnsureBlockIsValidExtension(block); - // specifically, that Proposal's ViewNumber is strictly monotonically + // specifically, that block's ViewNumber is strictly monotonically // increasing which is enforced by LevelledForest.VerifyVertex(...) // We denote: // * a DIRECT 1-chain as '<-' diff --git a/consensus/hotstuff/forks/forks_test.go b/consensus/hotstuff/forks/forks_test.go index 9662533dd0d..7b9c7af9b50 100644 --- a/consensus/hotstuff/forks/forks_test.go +++ b/consensus/hotstuff/forks/forks_test.go @@ -261,7 +261,7 @@ func TestFinalize_Multiple2Chains(t *testing.T) { } // TestFinalize_OrphanedFork tests that we can finalize a block which causes a conflicting fork to be orphaned. -// We ingest the the following block tree: +// We ingest the following block tree: // // [◄(1) 2] [◄(2) 3] // [◄(2) 4] [◄(4) 5] [◄(5) 6] @@ -389,7 +389,7 @@ func TestIgnoreBlocksBelowFinalizedView(t *testing.T) { } // TestDoubleProposal tests that the DoubleProposal notification is emitted when two different -// blocks for the same view are added. We ingest the the following block tree: +// blocks for the same view are added. We ingest the following block tree: // // / [◄(1) 2] // [1] @@ -460,7 +460,7 @@ func TestConflictingQCs(t *testing.T) { } // TestConflictingFinalizedForks checks that finalizing 2 conflicting forks should return model.ByzantineThresholdExceededError -// We ingest the the following block tree: +// We ingest the following block tree: // // [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5] // [◄(2) 6] [◄(6) 7] [◄(7) 8] diff --git a/consensus/hotstuff/helper/block.go b/consensus/hotstuff/helper/block.go index a3fa6f6e2e7..ce341f61476 100644 --- a/consensus/hotstuff/helper/block.go +++ b/consensus/hotstuff/helper/block.go @@ -56,10 +56,21 @@ func WithBlockQC(qc *flow.QuorumCertificate) func(*model.Block) { } } +func MakeSignedProposal(options ...func(*model.SignedProposal)) *model.SignedProposal { + proposal := &model.SignedProposal{ + Proposal: *MakeProposal(), + SigData: unittest.SignatureFixture(), + } + for _, option := range options { + option(proposal) + } + return proposal +} + func MakeProposal(options ...func(*model.Proposal)) *model.Proposal { proposal := &model.Proposal{ - Block: MakeBlock(), - SigData: unittest.SignatureFixture(), + Block: MakeBlock(), + LastViewTC: nil, } for _, option := range options { option(proposal) @@ -67,14 +78,20 @@ func MakeProposal(options ...func(*model.Proposal)) *model.Proposal { return proposal } +func WithProposal(proposal *model.Proposal) func(*model.SignedProposal) { + return func(signedProposal *model.SignedProposal) { + signedProposal.Proposal = *proposal + } +} + func WithBlock(block *model.Block) func(*model.Proposal) { return func(proposal *model.Proposal) { proposal.Block = block } } -func WithSigData(sigData []byte) func(*model.Proposal) { - return func(proposal *model.Proposal) { +func WithSigData(sigData []byte) func(*model.SignedProposal) { + return func(proposal *model.SignedProposal) { proposal.SigData = sigData } } @@ -84,3 +101,29 @@ func WithLastViewTC(lastViewTC *flow.TimeoutCertificate) func(*model.Proposal) { proposal.LastViewTC = lastViewTC } } + +// SignedProposalToFlow turns a block proposal into a flow header. +// +// CAUTION: This function is only suitable for TESTING purposes ONLY. +// In the conversion from `flow.Header` to HoStuff's `model.Block` we loose information +// (e.g. `ChainID` and `Height` are not included in `model.Block`) and hence the conversion +// is *not reversible*. This is on purpose, because we wanted to only expose data to +// HotStuff that HotStuff really needs. +func SignedProposalToFlow(proposal *model.SignedProposal) *flow.Header { + + block := proposal.Block + header := &flow.Header{ + ParentID: block.QC.BlockID, + PayloadHash: block.PayloadHash, + Timestamp: block.Timestamp, + View: block.View, + ParentView: block.QC.View, + ParentVoterIndices: block.QC.SignerIndices, + ParentVoterSigData: block.QC.SigData, + ProposerID: block.ProposerID, + ProposerSigData: proposal.SigData, + LastViewTC: proposal.LastViewTC, + } + + return header +} diff --git a/consensus/hotstuff/integration/connect_test.go b/consensus/hotstuff/integration/connect_test.go index a254e0f9f3c..177a8d0244b 100644 --- a/consensus/hotstuff/integration/connect_test.go +++ b/consensus/hotstuff/integration/connect_test.go @@ -37,7 +37,7 @@ func Connect(t *testing.T, instances []*Instance) { } // convert into proposal immediately - proposal := model.ProposalFromFlow(header) + proposal := model.SignedProposalFromFlow(header) // store locally and loop back to engine for processing sender.ProcessBlock(proposal) diff --git a/consensus/hotstuff/integration/filters_test.go b/consensus/hotstuff/integration/filters_test.go index 8d6ac067f48..8bf12fc6d3b 100644 --- a/consensus/hotstuff/integration/filters_test.go +++ b/consensus/hotstuff/integration/filters_test.go @@ -8,7 +8,7 @@ import ( ) // VoteFilter is a filter function for dropping Votes. -// Return value `true` implies that the the given Vote should be +// Return value `true` implies that the given Vote should be // dropped, while `false` indicates that the Vote should be received. type VoteFilter func(*model.Vote) bool @@ -34,34 +34,34 @@ func BlockVotesBy(voterID flow.Identifier) VoteFilter { } // ProposalFilter is a filter function for dropping Proposals. -// Return value `true` implies that the the given Proposal should be -// dropped, while `false` indicates that the Proposal should be received. -type ProposalFilter func(*model.Proposal) bool +// Return value `true` implies that the given SignedProposal should be +// dropped, while `false` indicates that the SignedProposal should be received. +type ProposalFilter func(*model.SignedProposal) bool -func BlockNoProposals(*model.Proposal) bool { +func BlockNoProposals(*model.SignedProposal) bool { return false } -func BlockAllProposals(*model.Proposal) bool { +func BlockAllProposals(*model.SignedProposal) bool { return true } // BlockProposalRandomly drops proposals randomly with a probability of `dropProbability` ∈ [0,1] func BlockProposalRandomly(dropProbability float64) ProposalFilter { - return func(*model.Proposal) bool { + return func(*model.SignedProposal) bool { return rand.Float64() < dropProbability } } // BlockProposalsBy drops all proposals originating from the specified `proposerID` func BlockProposalsBy(proposerID flow.Identifier) ProposalFilter { - return func(proposal *model.Proposal) bool { + return func(proposal *model.SignedProposal) bool { return proposal.Block.ProposerID == proposerID } } // TimeoutObjectFilter is a filter function for dropping TimeoutObjects. -// Return value `true` implies that the the given TimeoutObject should be +// Return value `true` implies that the given TimeoutObject should be // dropped, while `false` indicates that the TimeoutObject should be received. type TimeoutObjectFilter func(*model.TimeoutObject) bool diff --git a/consensus/hotstuff/integration/instance_test.go b/consensus/hotstuff/integration/instance_test.go index 069c6ede950..49b8e6e68bd 100644 --- a/consensus/hotstuff/integration/instance_test.go +++ b/consensus/hotstuff/integration/instance_test.go @@ -59,7 +59,7 @@ type Instance struct { queue chan interface{} updatingBlocks sync.RWMutex headers map[flow.Identifier]*flow.Header - pendings map[flow.Identifier]*model.Proposal // indexed by parent ID + pendings map[flow.Identifier]*model.SignedProposal // indexed by parent ID // mocked dependencies committee *mocks.DynamicCommittee @@ -151,7 +151,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { stop: cfg.StopCondition, // instance data - pendings: make(map[flow.Identifier]*model.Proposal), + pendings: make(map[flow.Identifier]*model.SignedProposal), headers: make(map[flow.Identifier]*flow.Header), queue: make(chan interface{}, 1024), @@ -294,7 +294,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { } // convert into proposal immediately - proposal := model.ProposalFromFlow(header) + proposal := model.SignedProposalFromFlow(header) // store locally and loop back to engine for processing in.ProcessBlock(proposal) @@ -403,7 +403,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { minRequiredWeight := committees.WeightThresholdToBuildQC(uint64(len(in.participants)) * weight) voteProcessorFactory := mocks.NewVoteProcessorFactory(t) voteProcessorFactory.On("Create", mock.Anything, mock.Anything).Return( - func(log zerolog.Logger, proposal *model.Proposal) hotstuff.VerifyingVoteProcessor { + func(log zerolog.Logger, proposal *model.SignedProposal) hotstuff.VerifyingVoteProcessor { stakingSigAggtor := helper.MakeWeightedSignatureAggregator(weight) stakingSigAggtor.On("Verify", mock.Anything, mock.Anything).Return(nil).Maybe() @@ -597,7 +597,7 @@ func (in *Instance) Run() error { } case msg := <-in.queue: switch m := msg.(type) { - case *model.Proposal: + case *model.SignedProposal: // add block to aggregator in.voteAggregator.AddBlock(m) // then pass to event handler @@ -629,7 +629,7 @@ func (in *Instance) Run() error { } } -func (in *Instance) ProcessBlock(proposal *model.Proposal) { +func (in *Instance) ProcessBlock(proposal *model.SignedProposal) { in.updatingBlocks.Lock() defer in.updatingBlocks.Unlock() _, parentExists := in.headers[proposal.Block.QC.BlockID] @@ -637,7 +637,7 @@ func (in *Instance) ProcessBlock(proposal *model.Proposal) { if parentExists { next := proposal for next != nil { - in.headers[next.Block.BlockID] = model.ProposalToFlow(next) + in.headers[next.Block.BlockID] = helper.SignedProposalToFlow(next) in.queue <- next // keep processing the pending blocks diff --git a/consensus/hotstuff/mocks/consumer.go b/consensus/hotstuff/mocks/consumer.go index cd2363a1b93..311ae5a8a29 100644 --- a/consensus/hotstuff/mocks/consumer.go +++ b/consensus/hotstuff/mocks/consumer.go @@ -79,7 +79,7 @@ func (_m *Consumer) OnQcTriggeredViewChange(oldView uint64, newView uint64, qc * } // OnReceiveProposal provides a mock function with given fields: currentView, proposal -func (_m *Consumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (_m *Consumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { _m.Called(currentView, proposal) } diff --git a/consensus/hotstuff/mocks/event_handler.go b/consensus/hotstuff/mocks/event_handler.go index a74897fd303..7fac8c02d1e 100644 --- a/consensus/hotstuff/mocks/event_handler.go +++ b/consensus/hotstuff/mocks/event_handler.go @@ -57,7 +57,7 @@ func (_m *EventHandler) OnPartialTcCreated(partialTC *hotstuff.PartialTcCreated) } // OnReceiveProposal provides a mock function with given fields: proposal -func (_m *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { +func (_m *EventHandler) OnReceiveProposal(proposal *model.SignedProposal) error { ret := _m.Called(proposal) if len(ret) == 0 { @@ -65,7 +65,7 @@ func (_m *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { } var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(proposal) } else { r0 = ret.Error(0) diff --git a/consensus/hotstuff/mocks/event_loop.go b/consensus/hotstuff/mocks/event_loop.go index 1d3943eb3c4..c27549181b2 100644 --- a/consensus/hotstuff/mocks/event_loop.go +++ b/consensus/hotstuff/mocks/event_loop.go @@ -98,7 +98,7 @@ func (_m *EventLoop) Start(_a0 irrecoverable.SignalerContext) { } // SubmitProposal provides a mock function with given fields: proposal -func (_m *EventLoop) SubmitProposal(proposal *model.Proposal) { +func (_m *EventLoop) SubmitProposal(proposal *model.SignedProposal) { _m.Called(proposal) } diff --git a/consensus/hotstuff/mocks/participant_consumer.go b/consensus/hotstuff/mocks/participant_consumer.go index 883380232b4..b6970bba788 100644 --- a/consensus/hotstuff/mocks/participant_consumer.go +++ b/consensus/hotstuff/mocks/participant_consumer.go @@ -42,7 +42,7 @@ func (_m *ParticipantConsumer) OnQcTriggeredViewChange(oldView uint64, newView u } // OnReceiveProposal provides a mock function with given fields: currentView, proposal -func (_m *ParticipantConsumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (_m *ParticipantConsumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { _m.Called(currentView, proposal) } diff --git a/consensus/hotstuff/mocks/safety_rules.go b/consensus/hotstuff/mocks/safety_rules.go index 54c36caf9fa..83f33269542 100644 --- a/consensus/hotstuff/mocks/safety_rules.go +++ b/consensus/hotstuff/mocks/safety_rules.go @@ -46,7 +46,7 @@ func (_m *SafetyRules) ProduceTimeout(curView uint64, newestQC *flow.QuorumCerti } // ProduceVote provides a mock function with given fields: proposal, curView -func (_m *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) { +func (_m *SafetyRules) ProduceVote(proposal *model.SignedProposal, curView uint64) (*model.Vote, error) { ret := _m.Called(proposal, curView) if len(ret) == 0 { @@ -55,10 +55,10 @@ func (_m *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*m var r0 *model.Vote var r1 error - if rf, ok := ret.Get(0).(func(*model.Proposal, uint64) (*model.Vote, error)); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal, uint64) (*model.Vote, error)); ok { return rf(proposal, curView) } - if rf, ok := ret.Get(0).(func(*model.Proposal, uint64) *model.Vote); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal, uint64) *model.Vote); ok { r0 = rf(proposal, curView) } else { if ret.Get(0) != nil { @@ -66,7 +66,7 @@ func (_m *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*m } } - if rf, ok := ret.Get(1).(func(*model.Proposal, uint64) error); ok { + if rf, ok := ret.Get(1).(func(*model.SignedProposal, uint64) error); ok { r1 = rf(proposal, curView) } else { r1 = ret.Error(1) diff --git a/consensus/hotstuff/mocks/validator.go b/consensus/hotstuff/mocks/validator.go index 9b97d23d762..728795ef209 100644 --- a/consensus/hotstuff/mocks/validator.go +++ b/consensus/hotstuff/mocks/validator.go @@ -16,7 +16,7 @@ type Validator struct { } // ValidateProposal provides a mock function with given fields: proposal -func (_m *Validator) ValidateProposal(proposal *model.Proposal) error { +func (_m *Validator) ValidateProposal(proposal *model.SignedProposal) error { ret := _m.Called(proposal) if len(ret) == 0 { @@ -24,7 +24,7 @@ func (_m *Validator) ValidateProposal(proposal *model.Proposal) error { } var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(proposal) } else { r0 = ret.Error(0) diff --git a/consensus/hotstuff/mocks/vote_aggregation_consumer.go b/consensus/hotstuff/mocks/vote_aggregation_consumer.go index 23ceb89ee6a..52819fcb1f9 100644 --- a/consensus/hotstuff/mocks/vote_aggregation_consumer.go +++ b/consensus/hotstuff/mocks/vote_aggregation_consumer.go @@ -31,7 +31,7 @@ func (_m *VoteAggregationConsumer) OnQcConstructedFromVotes(_a0 *flow.QuorumCert } // OnVoteForInvalidBlockDetected provides a mock function with given fields: vote, invalidProposal -func (_m *VoteAggregationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) { +func (_m *VoteAggregationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) { _m.Called(vote, invalidProposal) } diff --git a/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go b/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go index acac87cdce9..dcb718532a2 100644 --- a/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go +++ b/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go @@ -23,7 +23,7 @@ func (_m *VoteAggregationViolationConsumer) OnInvalidVoteDetected(err model.Inva } // OnVoteForInvalidBlockDetected provides a mock function with given fields: vote, invalidProposal -func (_m *VoteAggregationViolationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) { +func (_m *VoteAggregationViolationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) { _m.Called(vote, invalidProposal) } diff --git a/consensus/hotstuff/mocks/vote_aggregator.go b/consensus/hotstuff/mocks/vote_aggregator.go index 545990086ea..9bf372ecb2c 100644 --- a/consensus/hotstuff/mocks/vote_aggregator.go +++ b/consensus/hotstuff/mocks/vote_aggregator.go @@ -15,7 +15,7 @@ type VoteAggregator struct { } // AddBlock provides a mock function with given fields: block -func (_m *VoteAggregator) AddBlock(block *model.Proposal) { +func (_m *VoteAggregator) AddBlock(block *model.SignedProposal) { _m.Called(block) } @@ -45,7 +45,7 @@ func (_m *VoteAggregator) Done() <-chan struct{} { } // InvalidBlock provides a mock function with given fields: block -func (_m *VoteAggregator) InvalidBlock(block *model.Proposal) error { +func (_m *VoteAggregator) InvalidBlock(block *model.SignedProposal) error { ret := _m.Called(block) if len(ret) == 0 { @@ -53,7 +53,7 @@ func (_m *VoteAggregator) InvalidBlock(block *model.Proposal) error { } var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(block) } else { r0 = ret.Error(0) diff --git a/consensus/hotstuff/mocks/vote_collector.go b/consensus/hotstuff/mocks/vote_collector.go index 5165640a838..27db379c9a8 100644 --- a/consensus/hotstuff/mocks/vote_collector.go +++ b/consensus/hotstuff/mocks/vote_collector.go @@ -33,7 +33,7 @@ func (_m *VoteCollector) AddVote(vote *model.Vote) error { } // ProcessBlock provides a mock function with given fields: block -func (_m *VoteCollector) ProcessBlock(block *model.Proposal) error { +func (_m *VoteCollector) ProcessBlock(block *model.SignedProposal) error { ret := _m.Called(block) if len(ret) == 0 { @@ -41,7 +41,7 @@ func (_m *VoteCollector) ProcessBlock(block *model.Proposal) error { } var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(block) } else { r0 = ret.Error(0) diff --git a/consensus/hotstuff/mocks/vote_processor_factory.go b/consensus/hotstuff/mocks/vote_processor_factory.go index cb87a8568c3..5c4925cce4d 100644 --- a/consensus/hotstuff/mocks/vote_processor_factory.go +++ b/consensus/hotstuff/mocks/vote_processor_factory.go @@ -17,7 +17,7 @@ type VoteProcessorFactory struct { } // Create provides a mock function with given fields: log, proposal -func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { +func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { ret := _m.Called(log, proposal) if len(ret) == 0 { @@ -26,10 +26,10 @@ func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Propo var r0 hotstuff.VerifyingVoteProcessor var r1 error - if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.Proposal) (hotstuff.VerifyingVoteProcessor, error)); ok { + if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error)); ok { return rf(log, proposal) } - if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.Proposal) hotstuff.VerifyingVoteProcessor); ok { + if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.SignedProposal) hotstuff.VerifyingVoteProcessor); ok { r0 = rf(log, proposal) } else { if ret.Get(0) != nil { @@ -37,7 +37,7 @@ func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Propo } } - if rf, ok := ret.Get(1).(func(zerolog.Logger, *model.Proposal) error); ok { + if rf, ok := ret.Get(1).(func(zerolog.Logger, *model.SignedProposal) error); ok { r1 = rf(log, proposal) } else { r1 = ret.Error(1) diff --git a/consensus/hotstuff/model/errors.go b/consensus/hotstuff/model/errors.go index 4244d0ac531..75f79de92fd 100644 --- a/consensus/hotstuff/model/errors.go +++ b/consensus/hotstuff/model/errors.go @@ -113,7 +113,7 @@ type MissingBlockError struct { } func (e MissingBlockError) Error() string { - return fmt.Sprintf("missing Proposal at view %d with ID %v", e.View, e.BlockID) + return fmt.Sprintf("missing block at view %d with ID %v", e.View, e.BlockID) } // IsMissingBlockError returns whether an error is MissingBlockError @@ -165,11 +165,11 @@ func (e InvalidTCError) Unwrap() error { // InvalidProposalError indicates that the proposal is invalid type InvalidProposalError struct { - InvalidProposal *Proposal + InvalidProposal *SignedProposal Err error } -func NewInvalidProposalErrorf(proposal *Proposal, msg string, args ...interface{}) error { +func NewInvalidProposalErrorf(proposal *SignedProposal, msg string, args ...interface{}) error { return InvalidProposalError{ InvalidProposal: proposal, Err: fmt.Errorf(msg, args...), diff --git a/consensus/hotstuff/model/proposal.go b/consensus/hotstuff/model/proposal.go index 0cd19f85177..6dcca721583 100644 --- a/consensus/hotstuff/model/proposal.go +++ b/consensus/hotstuff/model/proposal.go @@ -4,16 +4,48 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// Proposal represent a new proposed block within HotStuff (and thus -// a header in the bigger picture), signed by the proposer. +// Proposal represents a block proposal under construction. +// In order to decide whether a proposal is safe to sign, HotStuff's Safety Rules require +// proof that the leader entered the respective view in a protocol-compliant manner. Specifically, +// we require a TimeoutCertificate [TC] if and only if the QC in the block is _not_ for the +// immediately preceding view. Thereby we protect the consensus process from malicious leaders +// attempting to skip views that haven't concluded yet (a form of front-running attack). +// However, LastViewTC is only relevant until a QC is known that certifies the correctness of +// the block. Thereafter, the QC attests that honest consensus participants have confirmed the +// validity of the fork up to the latest certified block (including protocol-compliant view transitions). +// +// By explicitly differentiating the Proposal from the SignedProposal (extending Proposal by +// adding the proposer's signature), we can unify the algorithmic path of signing block proposals. +// This codifies the important aspect that a proposer's signature for their own block +// is conceptually also just a vote (we explicitly use that for aggregating votes, including the +// proposer's own vote to a QC). In order to express this conceptual equivalence in code, the +// voting logic in Safety Rules must also operate on an unsigned Proposal. +// +// TODO: atm, the flow.Header embeds the LastViewTC. However, for HotStuff we have `model.Block` +// and `model.Proposal`, where the latter was introduced when we added the PaceMaker to +// vanilla HotStuff. It would be more consistent, if we added `LastViewTC` to `model.Block`, +// or even better, introduce an interface for HotStuff's notion of a block (exposing +// the fields in `model.Block` plus LastViewTC) type Proposal struct { Block *Block - SigData []byte LastViewTC *flow.TimeoutCertificate } +// SignedProposal represent a new proposed block within HotStuff (and thus +// a header in the bigger picture), signed by the proposer. +// +// CAUTION: the signature only covers the pair (Block.View, Block.BlockID). Therefore, only +// the data that is hashed into the BlockID is cryptographically secured by the proposer's +// signature. +// Specifically, the proposer's signature cannot be covered by the Block.BlockID, as the +// proposer _signs_ the Block.BlockID (otherwise we have a cyclic dependency). +type SignedProposal struct { + Proposal + SigData []byte +} + // ProposerVote extracts the proposer vote from the proposal -func (p *Proposal) ProposerVote() *Vote { +func (p *SignedProposal) ProposerVote() *Vote { vote := Vote{ View: p.Block.View, BlockID: p.Block.BlockID, @@ -23,32 +55,23 @@ func (p *Proposal) ProposerVote() *Vote { return &vote } -// ProposalFromFlow turns a flow header into a hotstuff block type. +// SignedProposalFromFlow turns a flow header into a hotstuff block type. +func SignedProposalFromFlow(header *flow.Header) *SignedProposal { + proposal := SignedProposal{ + Proposal: Proposal{ + Block: BlockFromFlow(header), + LastViewTC: header.LastViewTC, + }, + SigData: header.ProposerSigData, + } + return &proposal +} + +// ProposalFromFlow turns an unsigned flow header into a unsigned hotstuff block type. func ProposalFromFlow(header *flow.Header) *Proposal { proposal := Proposal{ Block: BlockFromFlow(header), - SigData: header.ProposerSigData, LastViewTC: header.LastViewTC, } return &proposal } - -// ProposalToFlow turns a block proposal into a flow header. -func ProposalToFlow(proposal *Proposal) *flow.Header { - - block := proposal.Block - header := &flow.Header{ - ParentID: block.QC.BlockID, - PayloadHash: block.PayloadHash, - Timestamp: block.Timestamp, - View: block.View, - ParentView: block.QC.View, - ParentVoterIndices: block.QC.SignerIndices, - ParentVoterSigData: block.QC.SigData, - ProposerID: block.ProposerID, - ProposerSigData: proposal.SigData, - LastViewTC: proposal.LastViewTC, - } - - return header -} diff --git a/consensus/hotstuff/notifications/log_consumer.go b/consensus/hotstuff/notifications/log_consumer.go index f8baea639dc..3e454b61272 100644 --- a/consensus/hotstuff/notifications/log_consumer.go +++ b/consensus/hotstuff/notifications/log_consumer.go @@ -69,7 +69,7 @@ func (lc *LogConsumer) OnDoubleProposeDetected(block *model.Block, alt *model.Bl Msg("double proposal detected") } -func (lc *LogConsumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (lc *LogConsumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { logger := lc.logBasicBlockData(lc.log.Debug(), proposal.Block). Uint64("cur_view", currentView) lastViewTC := proposal.LastViewTC @@ -197,7 +197,7 @@ func (lc *LogConsumer) OnInvalidVoteDetected(err model.InvalidVoteError) { Msgf("invalid vote detected: %s", err.Error()) } -func (lc *LogConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.Proposal) { +func (lc *LogConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.SignedProposal) { lc.log.Warn(). Str(logging.KeySuspicious, "true"). Uint64("vote_view", vote.View). diff --git a/consensus/hotstuff/notifications/noop_consumer.go b/consensus/hotstuff/notifications/noop_consumer.go index d8ad3e66e4f..38433c72b1a 100644 --- a/consensus/hotstuff/notifications/noop_consumer.go +++ b/consensus/hotstuff/notifications/noop_consumer.go @@ -32,7 +32,7 @@ func (*NoopParticipantConsumer) OnEventProcessed() {} func (*NoopParticipantConsumer) OnStart(uint64) {} -func (*NoopParticipantConsumer) OnReceiveProposal(uint64, *model.Proposal) {} +func (*NoopParticipantConsumer) OnReceiveProposal(uint64, *model.SignedProposal) {} func (*NoopParticipantConsumer) OnReceiveQc(uint64, *flow.QuorumCertificate) {} @@ -116,7 +116,8 @@ func (*NoopProposalViolationConsumer) OnDoubleVotingDetected(*model.Vote, *model func (*NoopProposalViolationConsumer) OnInvalidVoteDetected(model.InvalidVoteError) {} -func (*NoopProposalViolationConsumer) OnVoteForInvalidBlockDetected(*model.Vote, *model.Proposal) {} +func (*NoopProposalViolationConsumer) OnVoteForInvalidBlockDetected(*model.Vote, *model.SignedProposal) { +} func (*NoopProposalViolationConsumer) OnDoubleTimeoutDetected(*model.TimeoutObject, *model.TimeoutObject) { } diff --git a/consensus/hotstuff/notifications/pubsub/participant_distributor.go b/consensus/hotstuff/notifications/pubsub/participant_distributor.go index f5047cd7a53..4285a96b258 100644 --- a/consensus/hotstuff/notifications/pubsub/participant_distributor.go +++ b/consensus/hotstuff/notifications/pubsub/participant_distributor.go @@ -46,7 +46,7 @@ func (d *ParticipantDistributor) OnStart(currentView uint64) { } } -func (d *ParticipantDistributor) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (d *ParticipantDistributor) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { d.lock.RLock() defer d.lock.RUnlock() for _, subscriber := range d.consumers { diff --git a/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go b/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go index d9d1e9baa26..7b75bd933e1 100644 --- a/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go +++ b/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go @@ -43,7 +43,7 @@ func (d *VoteAggregationViolationDistributor) OnInvalidVoteDetected(err model.In } } -func (d *VoteAggregationViolationDistributor) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) { +func (d *VoteAggregationViolationDistributor) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) { d.lock.RLock() defer d.lock.RUnlock() for _, subscriber := range d.consumers { diff --git a/consensus/hotstuff/notifications/slashing_violation_consumer.go b/consensus/hotstuff/notifications/slashing_violation_consumer.go index c03347ece6f..940c8270198 100644 --- a/consensus/hotstuff/notifications/slashing_violation_consumer.go +++ b/consensus/hotstuff/notifications/slashing_violation_consumer.go @@ -78,7 +78,7 @@ func (c *SlashingViolationsConsumer) OnInvalidTimeoutDetected(err model.InvalidT Msg("OnInvalidTimeoutDetected") } -func (c *SlashingViolationsConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.Proposal) { +func (c *SlashingViolationsConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.SignedProposal) { c.log.Warn(). Uint64("vote_view", vote.View). Hex("voted_block_id", vote.BlockID[:]). diff --git a/consensus/hotstuff/notifications/telemetry.go b/consensus/hotstuff/notifications/telemetry.go index d6cc3852179..a636bac3789 100644 --- a/consensus/hotstuff/notifications/telemetry.go +++ b/consensus/hotstuff/notifications/telemetry.go @@ -60,7 +60,7 @@ func (t *TelemetryConsumer) OnStart(currentView uint64) { t.pathHandler.NextStep().Msg("OnStart") } -func (t *TelemetryConsumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (t *TelemetryConsumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { block := proposal.Block t.pathHandler.StartNextPath(currentView) step := t.pathHandler.NextStep(). diff --git a/consensus/hotstuff/safety_rules.go b/consensus/hotstuff/safety_rules.go index f32e8873c4f..7e6e29b392f 100644 --- a/consensus/hotstuff/safety_rules.go +++ b/consensus/hotstuff/safety_rules.go @@ -31,7 +31,7 @@ type SafetyData struct { // Implementations are generally *not* concurrency safe. type SafetyRules interface { // ProduceVote takes a block proposal and current view, and decides whether to vote for the block. - // Voting is deterministic meaning voting for same proposal will always result in the same vote. + // Voting is deterministic, i.e. voting for same proposal will always result in the same vote. // Returns: // * (vote, nil): On the _first_ block for the current view that is safe to vote for. // Subsequently, voter does _not_ vote for any _other_ block with the same (or lower) view. @@ -40,7 +40,8 @@ type SafetyRules interface { // * (nil, model.NoVoteError): If the safety module decides that it is not safe to vote for the given block. // This is a sentinel error and _expected_ during normal operation. // All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). - ProduceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) + ProduceVote(proposal *model.SignedProposal, curView uint64) (*model.Vote, error) + // ProduceTimeout takes current view, highest locally known QC and TC (optional, must be nil if and // only if QC is for previous view) and decides whether to produce timeout for current view. // Returns: diff --git a/consensus/hotstuff/safetyrules/safety_rules.go b/consensus/hotstuff/safetyrules/safety_rules.go index e00942603b9..85778736a26 100644 --- a/consensus/hotstuff/safetyrules/safety_rules.go +++ b/consensus/hotstuff/safetyrules/safety_rules.go @@ -73,14 +73,33 @@ func New( // This is a sentinel error and _expected_ during normal operation. // // All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). -func (r *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) { +func (r *SafetyRules) ProduceVote(signedProposal *model.SignedProposal, curView uint64) (*model.Vote, error) { + return r.produceVote(&signedProposal.Proposal, curView) +} + +// produceVote implements the core Safety Rules to validate whether it is safe to vote. +// This method is to be used to vote for other leaders' blocks as well as this node's own proposals +// under construction. We explicitly codify the important aspect that a proposer's signature for their +// own block is conceptually also just a vote (we explicitly use that property when aggregating votes and +// including the proposer's own vote into a QC). In order to express this conceptual equivalence in code, the +// voting logic in Safety Rules must also operate on an unsigned Proposal. +// +// The curView is taken as input to ensure SafetyRules will only vote for proposals at current view and prevent double voting. +// Returns: +// - (vote, nil): On the _first_ block for the current view that is safe to vote for. +// Subsequently, voter does _not_ vote for any other block with the same (or lower) view. +// - (nil, model.NoVoteError): If the voter decides that it does not want to vote for the given block. +// This is a sentinel error and _expected_ during normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). +func (r *SafetyRules) produceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) { block := proposal.Block // sanity checks: if curView != block.View { return nil, fmt.Errorf("expecting block for current view %d, but block's view is %d", curView, block.View) } - err := r.IsSafeToVote(proposal) + err := r.isSafeToVote(proposal) if err != nil { return nil, fmt.Errorf("not safe to vote for proposal %x: %w", proposal.Block.BlockID, err) } @@ -230,13 +249,13 @@ func (r *SafetyRules) SignOwnProposal(unsignedProposal *model.Proposal) (*model. return nil, fmt.Errorf("can't sign proposal for someone else's block") } - return r.ProduceVote(unsignedProposal, unsignedProposal.Block.View) + return r.produceVote(unsignedProposal, unsignedProposal.Block.View) } -// IsSafeToVote checks if this proposal is valid in terms of voting rules, if voting for this proposal won't break safety rules. +// isSafeToVote checks if this proposal is valid in terms of voting rules, if voting for this proposal won't break safety rules. // Expected errors during normal operations: // - NoVoteError if replica already acted during this view (either voted or generated timeout) -func (r *SafetyRules) IsSafeToVote(proposal *model.Proposal) error { +func (r *SafetyRules) isSafeToVote(proposal *model.Proposal) error { blockView := proposal.Block.View err := r.validateEvidenceForEnteringView(blockView, proposal.Block.QC, proposal.LastViewTC) diff --git a/consensus/hotstuff/safetyrules/safety_rules_test.go b/consensus/hotstuff/safetyrules/safety_rules_test.go index 4c650ca1141..8034c76793b 100644 --- a/consensus/hotstuff/safetyrules/safety_rules_test.go +++ b/consensus/hotstuff/safetyrules/safety_rules_test.go @@ -31,7 +31,7 @@ type SafetyRulesTestSuite struct { suite.Suite bootstrapBlock *model.Block - proposal *model.Proposal + proposal *model.SignedProposal proposerIdentity *flow.Identity ourIdentity *flow.Identity signer *mocks.Signer @@ -50,13 +50,13 @@ func (s *SafetyRulesTestSuite) SetupTest() { // bootstrap at random bootstrapBlock s.bootstrapBlock = helper.MakeBlock(helper.WithBlockView(100)) - s.proposal = helper.MakeProposal( + s.proposal = helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+1), helper.WithBlockProposer(s.proposerIdentity.NodeID)), - )) + )))) s.committee.On("Self").Return(s.ourIdentity.NodeID).Maybe() s.committee.On("LeaderForView", mock.Anything).Return(s.proposerIdentity.NodeID, nil).Maybe() @@ -104,13 +104,13 @@ func (s *SafetyRulesTestSuite) TestProduceVote_ShouldVote() { helper.WithTCNewestQC(s.proposal.Block.QC)) // voting on proposal where last view ended with TC - proposalWithTC := helper.MakeProposal( + proposalWithTC := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.proposal.Block.View+2), helper.WithBlockProposer(s.proposerIdentity.NodeID))), - helper.WithLastViewTC(lastViewTC)) + helper.WithLastViewTC(lastViewTC)))) expectedSafetyData = &hotstuff.SafetyData{ LockedOneChainView: s.proposal.Block.QC.View, @@ -139,13 +139,13 @@ func (s *SafetyRulesTestSuite) TestProduceVote_IncludedQCHigherThanTCsQC() { helper.WithTCNewestQC(s.proposal.Block.QC)) // voting on proposal where last view ended with TC - proposalWithTC := helper.MakeProposal( + proposalWithTC := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.proposal.Block), helper.WithBlockView(s.proposal.Block.View+2), helper.WithBlockProposer(s.proposerIdentity.NodeID))), - helper.WithLastViewTC(lastViewTC)) + helper.WithLastViewTC(lastViewTC)))) expectedSafetyData := &hotstuff.SafetyData{ LockedOneChainView: proposalWithTC.Block.QC.View, @@ -210,13 +210,13 @@ func (s *SafetyRulesTestSuite) TestProduceVote_InvalidCurrentView() { }) s.Run("view-not-monotonously-increasing", func() { // create block with view < HighestAcknowledgedView - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( func(block *model.Block) { block.QC = helper.MakeQC(helper.WithQCView(s.safetyData.HighestAcknowledgedView - 2)) }, - helper.WithBlockView(s.safetyData.HighestAcknowledgedView-1)))) + helper.WithBlockView(s.safetyData.HighestAcknowledgedView-1)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Nil(s.T(), vote) require.Error(s.T(), err) @@ -362,12 +362,12 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // a proposal which includes a QC for the previous round should not contain a TC s.Run("proposal-includes-last-view-qc-and-tc", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+1))), - helper.WithLastViewTC(helper.MakeTC())) + helper.WithLastViewTC(helper.MakeTC())))) s.committee.On("IdentityByBlock", proposal.Block.BlockID, proposal.Block.ProposerID).Return(s.proposerIdentity, nil).Maybe() vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) @@ -376,11 +376,11 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { }) s.Run("no-last-view-tc", func() { // create block where Block.View != Block.QC.View+1 and LastViewTC = nil - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), - helper.WithBlockView(s.bootstrapBlock.View+2)))) + helper.WithBlockView(s.bootstrapBlock.View+2)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -389,14 +389,14 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { s.Run("last-view-tc-invalid-view", func() { // create block where Block.View != Block.QC.View+1 and // Block.View != LastViewTC.View+1 - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+2))), helper.WithLastViewTC( helper.MakeTC( - helper.WithTCView(s.bootstrapBlock.View)))) + helper.WithTCView(s.bootstrapBlock.View)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -406,7 +406,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // create block where Block.View != Block.QC.View+1 and // Block.View == LastViewTC.View+1 and Block.QC.View >= Block.View // in this case block is not safe to extend since proposal includes QC which is newer than the proposal itself. - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), @@ -416,7 +416,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { })), helper.WithLastViewTC( helper.MakeTC( - helper.WithTCView(s.bootstrapBlock.View+1)))) + helper.WithTCView(s.bootstrapBlock.View+1)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -428,7 +428,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // in this case block is not safe to extend since proposal is built on top of QC, which is lower // than QC presented in LastViewTC. TONewestQC := helper.MakeQC(helper.WithQCView(s.bootstrapBlock.View + 1)) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), @@ -436,7 +436,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { helper.WithLastViewTC( helper.MakeTC( helper.WithTCView(s.bootstrapBlock.View+1), - helper.WithTCNewestQC(TONewestQC)))) + helper.WithTCNewestQC(TONewestQC)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -464,13 +464,13 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VoteEquivocation() { require.NotNil(s.T(), vote) require.Equal(s.T(), expectedVote, vote) - equivocatingProposal := helper.MakeProposal( + equivocatingProposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+1), helper.WithBlockProposer(s.proposerIdentity.NodeID)), - )) + )))) // voting at same view(even different proposal) should result in NoVoteError vote, err = s.safety.ProduceVote(equivocatingProposal, s.proposal.Block.View) @@ -480,7 +480,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VoteEquivocation() { s.proposal.Block.ProposerID = s.ourIdentity.NodeID // proposing at the same view should result in NoVoteError since we have already voted - vote, err = s.safety.SignOwnProposal(s.proposal) + vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal) require.True(s.T(), model.IsNoVoteError(err)) require.Nil(s.T(), vote) } @@ -764,7 +764,7 @@ func (s *SafetyRulesTestSuite) TestSignOwnProposal() { s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.ourIdentity.NodeID, nil).Once() s.signer.On("CreateVote", s.proposal.Block).Return(expectedVote, nil).Once() s.persister.On("PutSafetyData", expectedSafetyData).Return(nil).Once() - vote, err := s.safety.SignOwnProposal(s.proposal) + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) require.NoError(s.T(), err) require.Equal(s.T(), vote, expectedVote) } @@ -772,7 +772,7 @@ func (s *SafetyRulesTestSuite) TestSignOwnProposal() { // TestSignOwnProposal_ProposalNotSelf tests that we cannot sign a proposal that is not ours. We // verify that SafetyRules returns an exception and not the benign sentinel error NoVoteError. func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalNotSelf() { - vote, err := s.safety.SignOwnProposal(s.proposal) + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) require.Nil(s.T(), vote) @@ -786,7 +786,7 @@ func (s *SafetyRulesTestSuite) TestSignOwnProposal_SelfInvalidLeader() { require.NotEqual(s.T(), otherID, s.ourIdentity.NodeID) s.committee.On("LeaderForView").Unset() s.committee.On("LeaderForView", s.proposal.Block.View).Return(otherID, nil).Once() - vote, err := s.safety.SignOwnProposal(s.proposal) + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) require.Nil(s.T(), vote) @@ -811,12 +811,12 @@ func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalEquivocation() { s.signer.On("CreateVote", s.proposal.Block).Return(expectedVote, nil).Once() s.persister.On("PutSafetyData", expectedSafetyData).Return(nil).Once() - vote, err := s.safety.SignOwnProposal(s.proposal) + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) require.NoError(s.T(), err) require.Equal(s.T(), expectedVote, vote) // signing same proposal again should return an error since we have already created a proposal for this view - vote, err = s.safety.SignOwnProposal(s.proposal) + vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal) require.Error(s.T(), err) require.True(s.T(), model.IsNoVoteError(err)) require.Nil(s.T(), vote) diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go index e5f443d6898..309d1351b98 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go @@ -575,8 +575,9 @@ func createRealQC( block *model.Block, ) *flow.QuorumCertificate { leader := signers[0] - proposal, err := signerObjects[leader.NodeID].CreateProposal(block) + leaderVote, err := signerObjects[leader.NodeID].CreateVote(block) require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block))), helper.WithSigData(leaderVote.SigData)) var createdQC *flow.QuorumCertificate onQCCreated := func(qc *flow.QuorumCertificate) { diff --git a/consensus/hotstuff/validator.go b/consensus/hotstuff/validator.go index be3313e9f26..ff40c550a2a 100644 --- a/consensus/hotstuff/validator.go +++ b/consensus/hotstuff/validator.go @@ -24,7 +24,7 @@ type Validator interface { // During normal operations, the following error returns are expected: // * model.InvalidProposalError if the block is invalid // * model.ErrViewForUnknownEpoch if the proposal refers unknown epoch - ValidateProposal(proposal *model.Proposal) error + ValidateProposal(proposal *model.SignedProposal) error // ValidateVote checks the validity of a vote. // Returns the full entity for the voter. During normal operations, diff --git a/consensus/hotstuff/validator/metrics_wrapper.go b/consensus/hotstuff/validator/metrics_wrapper.go index 8876acef248..5bd2aad9bec 100644 --- a/consensus/hotstuff/validator/metrics_wrapper.go +++ b/consensus/hotstuff/validator/metrics_wrapper.go @@ -40,7 +40,7 @@ func (w ValidatorMetricsWrapper) ValidateTC(tc *flow.TimeoutCertificate) error { return err } -func (w ValidatorMetricsWrapper) ValidateProposal(proposal *model.Proposal) error { +func (w ValidatorMetricsWrapper) ValidateProposal(proposal *model.SignedProposal) error { processStart := time.Now() err := w.validator.ValidateProposal(proposal) w.metrics.ValidatorProcessingDuration(time.Since(processStart)) diff --git a/consensus/hotstuff/validator/validator.go b/consensus/hotstuff/validator/validator.go index 933c3751619..597f0b5360f 100644 --- a/consensus/hotstuff/validator/validator.go +++ b/consensus/hotstuff/validator/validator.go @@ -201,7 +201,7 @@ func (v *Validator) ValidateQC(qc *flow.QuorumCertificate) error { // - model.ErrViewForUnknownEpoch if the proposal refers unknown epoch // // Any other error should be treated as exception -func (v *Validator) ValidateProposal(proposal *model.Proposal) error { +func (v *Validator) ValidateProposal(proposal *model.SignedProposal) error { qc := proposal.Block.QC block := proposal.Block diff --git a/consensus/hotstuff/validator/validator_test.go b/consensus/hotstuff/validator/validator_test.go index 7683d7cbe0b..6c7e91ad0fa 100644 --- a/consensus/hotstuff/validator/validator_test.go +++ b/consensus/hotstuff/validator/validator_test.go @@ -35,7 +35,7 @@ type ProposalSuite struct { parent *model.Block block *model.Block voters flow.IdentitySkeletonList - proposal *model.Proposal + proposal *model.SignedProposal vote *model.Vote voter *flow.IdentitySkeleton committee *mocks.Replicas @@ -70,7 +70,7 @@ func (ps *ProposalSuite) SetupTest() { require.NoError(ps.T(), err) ps.voters = ps.participants.Filter(filter.HasNodeID[flow.Identity](voterIDs...)).ToSkeleton() - ps.proposal = &model.Proposal{Block: ps.block} + ps.proposal = helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(ps.block)))) ps.vote = ps.proposal.ProposerVote() ps.voter = ps.leader @@ -256,7 +256,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { ps.committee.On("LeaderForView", mock.Anything).Return(ps.leader.NodeID, nil) ps.Run("happy-path", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -267,14 +267,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(nil).Once() err := ps.validator.ValidateProposal(proposal) require.NoError(ps.T(), err) }) ps.Run("no-tc", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -282,14 +282,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithBlockQC(ps.block.QC)), ), // in this case proposal without LastViewTC is considered invalid - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyQC") ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") }) ps.Run("tc-for-wrong-view", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -300,14 +300,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+10), // LastViewTC.View must be equal to Block.View-1 helper.WithTCNewestQC(ps.block.QC))), - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyQC") ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") }) ps.Run("proposal-not-safe-to-extend", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -319,14 +319,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCView(ps.block.View+1), // proposal is not safe to extend because included QC.View is higher that Block.QC.View helper.WithTCNewestQC(helper.MakeQC(helper.WithQCView(ps.block.View+1))))), - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyQC") ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") }) ps.Run("included-tc-highest-qc-not-highest", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -338,7 +338,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC), )), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, mock.Anything).Return(nil).Once() @@ -352,7 +352,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { // TC is signed by only one signer - insufficient to reach weight threshold insufficientSignerIndices, err := signature.EncodeSignersToIndices(ps.participants.NodeIDs(), ps.participants.NodeIDs()[:1]) require.NoError(ps.T(), err) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -364,7 +364,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC), )), - ) + ))) err = ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err) && model.IsInvalidTCError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") @@ -375,7 +375,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithQCView(ps.block.QC.View-1), helper.WithQCSigners(ps.indices)) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -386,7 +386,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(qc))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(nil).Once() ps.verifier.On("VerifyQC", ps.voters, qc.SigData, @@ -399,7 +399,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithQCView(ps.block.QC.View-2), helper.WithQCSigners(ps.indices)) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -410,7 +410,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(newestQC))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(nil).Once() // Validating QC included in TC returns ErrViewForUnknownEpoch @@ -423,7 +423,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { require.NotErrorIs(ps.T(), err, model.ErrViewForUnknownEpoch) }) ps.Run("included-tc-invalid-sig", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -434,7 +434,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(model.ErrInvalidSignature).Once() err := ps.validator.ValidateProposal(proposal) @@ -443,7 +443,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews) }) ps.Run("last-view-successful-but-includes-tc", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.finalized+1), helper.WithBlockProposer(ps.leader.NodeID), @@ -451,7 +451,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithParentBlock(ps.parent)), ), helper.WithLastViewTC(helper.MakeTC()), - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") diff --git a/consensus/hotstuff/verification/combined_signer_v2_test.go b/consensus/hotstuff/verification/combined_signer_v2_test.go index 1ff02a29fdd..4f31f730e72 100644 --- a/consensus/hotstuff/verification/combined_signer_v2_test.go +++ b/consensus/hotstuff/verification/combined_signer_v2_test.go @@ -75,10 +75,10 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { safetyRules, err := safetyrules.New(signer, persist, committee) require.NoError(t, err) - // check that a created proposal can be verified by a verifier + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification vote, err := safetyRules.SignOwnProposal(proposal) require.NoError(t, err) - proposal.SigData = vote.SigData + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, proposal.Block.View, proposal.Block.BlockID) require.NoError(t, err) @@ -92,7 +92,7 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { require.NoError(t, err) expectedSig := msig.EncodeDoubleSig(stakingSig, beaconSig) - require.Equal(t, expectedSig, proposal.SigData) + require.Equal(t, expectedSig, vote.SigData) // vote should be valid vote, err = signer.CreateVote(block) @@ -187,10 +187,9 @@ func TestCombinedSignWithNoBeaconKey(t *testing.T) { safetyRules, err := safetyrules.New(signer, persist, committee) require.NoError(t, err) - // check that a created proposal can be verified by a verifier + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification vote, err := safetyRules.SignOwnProposal(proposal) require.NoError(t, err) - proposal.SigData = vote.SigData err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, proposal.Block.View, proposal.Block.BlockID) require.NoError(t, err) @@ -202,7 +201,7 @@ func TestCombinedSignWithNoBeaconKey(t *testing.T) { msig.NewBLSHasher(msig.ConsensusVoteTag), ) require.NoError(t, err) - require.Equal(t, expectedStakingSig, crypto.Signature(proposal.SigData)) + require.Equal(t, expectedStakingSig, crypto.Signature(vote.SigData)) } // Test_VerifyQC_EmptySigners checks that Verifier returns an `model.InsufficientSignaturesError` diff --git a/consensus/hotstuff/verification/combined_signer_v3.go b/consensus/hotstuff/verification/combined_signer_v3.go index 09651fc4925..a496af91387 100644 --- a/consensus/hotstuff/verification/combined_signer_v3.go +++ b/consensus/hotstuff/verification/combined_signer_v3.go @@ -53,29 +53,6 @@ func NewCombinedSignerV3( return sc } -// CreateProposal will create a proposal with a combined signature for the given block. -func (c *CombinedSignerV3) CreateProposal(block *model.Block) (*model.Proposal, error) { - - // check that the block is created by us - if block.ProposerID != c.staking.NodeID() { - return nil, fmt.Errorf("can't create proposal for someone else's block") - } - - // create the signature data - sigData, err := c.genSigData(block) - if err != nil { - return nil, fmt.Errorf("signing my proposal failed: %w", err) - } - - // create the proposal - proposal := &model.Proposal{ - Block: block, - SigData: sigData, - } - - return proposal, nil -} - // CreateVote will create a vote with a combined signature for the given block. func (c *CombinedSignerV3) CreateVote(block *model.Block) (*model.Vote, error) { diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index e655612dcc2..9317268ce59 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -57,12 +57,11 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifierV3(committee, packer) - // check that a created proposal can be verified by a verifier - proposal, err := signer.CreateProposal(block) + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification + vote, err := signer.CreateVote(block) require.NoError(t, err) - vote := proposal.ProposerVote() - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(nodeID, vote.SigData, block.View, block.BlockID) require.NoError(t, err) // check that a created proposal's signature is a combined staking sig and random beacon sig @@ -72,14 +71,14 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { require.NoError(t, err) expectedSig := msig.EncodeSingleSig(encoding.SigTypeRandomBeacon, beaconSig) - require.Equal(t, expectedSig, proposal.SigData) + require.Equal(t, expectedSig, vote.SigData) // Vote from a node that is _not_ part of the Random Beacon committee should be rejected. // Specifically, we expect that the verifier recognizes the `protocol.IdentityNotFoundError` // as a sign of an invalid vote and wraps it into a `model.InvalidSignerError`. *dkg = mocks.DKG{} // overwrite DKG mock with a new one dkg.On("KeyShare", signerID).Return(nil, protocol.IdentityNotFoundError{NodeID: signerID}) - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(nodeID, vote.SigData, block.View, block.BlockID) require.True(t, model.IsInvalidSignerError(err)) } @@ -121,11 +120,11 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifierV3(committee, packer) - proposal, err := signer.CreateProposal(block) + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification + vote, err := signer.CreateVote(block) require.NoError(t, err) - vote := proposal.ProposerVote() - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(nodeID, vote.SigData, block.View, block.BlockID) require.NoError(t, err) // check that a created proposal's signature is a combined staking sig and random beacon sig @@ -136,7 +135,7 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { expectedSig := msig.EncodeSingleSig(encoding.SigTypeStaking, stakingSig) // check the signature only has staking sig - require.Equal(t, expectedSig, proposal.SigData) + require.Equal(t, expectedSig, vote.SigData) } // Test_VerifyQC checks that a QC where either signer list is empty is rejected as invalid diff --git a/consensus/hotstuff/verification/staking_signer.go b/consensus/hotstuff/verification/staking_signer.go index bbc590d2e07..cc1b9ca1291 100644 --- a/consensus/hotstuff/verification/staking_signer.go +++ b/consensus/hotstuff/verification/staking_signer.go @@ -40,29 +40,6 @@ func NewStakingSigner( return sc } -// CreateProposal will create a proposal with a staking signature for the given block. -func (c *StakingSigner) CreateProposal(block *model.Block) (*model.Proposal, error) { - - // check that the block is created by us - if block.ProposerID != c.signerID { - return nil, fmt.Errorf("can't create proposal for someone else's block") - } - - // create the signature data - sigData, err := c.genSigData(block) - if err != nil { - return nil, fmt.Errorf("signing my proposal failed: %w", err) - } - - // create the proposal - proposal := &model.Proposal{ - Block: block, - SigData: sigData, - } - - return proposal, nil -} - // CreateVote will create a vote with a staking signature for the given block. func (c *StakingSigner) CreateVote(block *model.Block) (*model.Vote, error) { diff --git a/consensus/hotstuff/verification/staking_signer_test.go b/consensus/hotstuff/verification/staking_signer_test.go index 69f31bdfed3..6fc4d14fdc5 100644 --- a/consensus/hotstuff/verification/staking_signer_test.go +++ b/consensus/hotstuff/verification/staking_signer_test.go @@ -15,57 +15,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -// TestStakingSigner_CreateProposal verifies that StakingSigner can produce correctly signed proposal -// that can be verified later using StakingVerifier. -// Additionally, we check cases where errors during signing are happening. -func TestStakingSigner_CreateProposal(t *testing.T) { - stakingPriv := unittest.StakingPrivKeyFixture() - signer := unittest.IdentityFixture() - signerID := signer.NodeID - signer.StakingPubKey = stakingPriv.PublicKey() - - t.Run("invalid-signer-id", func(t *testing.T) { - me := &modulemock.Local{} - me.On("NodeID").Return(signerID) - signer := NewStakingSigner(me) - - block := helper.MakeBlock() - proposal, err := signer.CreateProposal(block) - require.Error(t, err) - require.Nil(t, proposal) - }) - t.Run("could-not-sign", func(t *testing.T) { - signException := errors.New("sign-exception") - me := &modulemock.Local{} - me.On("NodeID").Return(signerID) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, signException).Once() - signer := NewStakingSigner(me) - - block := helper.MakeBlock() - proposal, err := signer.CreateProposal(block) - require.ErrorAs(t, err, &signException) - require.Nil(t, proposal) - }) - t.Run("created-proposal", func(t *testing.T) { - me, err := local.New(signer.IdentitySkeleton, stakingPriv) - require.NoError(t, err) - - signerIdentity := &unittest.IdentityFixture(unittest.WithNodeID(signerID), - unittest.WithStakingPubKey(stakingPriv.PublicKey())).IdentitySkeleton - - signer := NewStakingSigner(me) - - block := helper.MakeBlock(helper.WithBlockProposer(signerID)) - proposal, err := signer.CreateProposal(block) - require.NoError(t, err) - require.NotNil(t, proposal) - - verifier := NewStakingVerifier() - err = verifier.VerifyVote(signerIdentity, proposal.SigData, proposal.Block.View, proposal.Block.BlockID) - require.NoError(t, err) - }) -} - // TestStakingSigner_CreateVote verifies that StakingSigner can produce correctly signed vote // that can be verified later using StakingVerifier. // Additionally, we check cases where errors during signing are happening. @@ -83,7 +32,7 @@ func TestStakingSigner_CreateVote(t *testing.T) { signer := NewStakingSigner(me) block := helper.MakeBlock() - proposal, err := signer.CreateProposal(block) + proposal, err := signer.CreateVote(block) require.ErrorAs(t, err, &signException) require.Nil(t, proposal) }) diff --git a/consensus/hotstuff/vote_aggregator.go b/consensus/hotstuff/vote_aggregator.go index 14dc4f7dc2f..7c9bbcaad01 100644 --- a/consensus/hotstuff/vote_aggregator.go +++ b/consensus/hotstuff/vote_aggregator.go @@ -25,12 +25,12 @@ type VoteAggregator interface { // CAUTION: we expect that the input block's validity has been confirmed prior to calling AddBlock, // including the proposer's signature. Otherwise, VoteAggregator might crash or exhibit undefined // behaviour. - AddBlock(block *model.Proposal) + AddBlock(block *model.SignedProposal) // InvalidBlock notifies the VoteAggregator about an invalid proposal, so that it // can process votes for the invalid block and slash the voters. // No errors are expected during normal operations - InvalidBlock(block *model.Proposal) error + InvalidBlock(block *model.SignedProposal) error // PruneUpToView deletes all votes _below_ to the given view, as well as // related indices. We only retain and process whose view is equal or larger diff --git a/consensus/hotstuff/vote_collector.go b/consensus/hotstuff/vote_collector.go index 157ef5338a7..3a259808dc4 100644 --- a/consensus/hotstuff/vote_collector.go +++ b/consensus/hotstuff/vote_collector.go @@ -61,7 +61,7 @@ type VoteCollector interface { // It returns nil if the block is valid. // It returns model.InvalidProposalError if block is invalid. // It returns other error if there is exception processing the block. - ProcessBlock(block *model.Proposal) error + ProcessBlock(block *model.SignedProposal) error // AddVote adds a vote to the collector // When enough votes have been added to produce a QC, the QC will be created asynchronously, and @@ -116,5 +116,5 @@ type VoteProcessorFactory interface { // Caller can be sure that proposal vote was successfully verified and processed. // Expected error returns during normal operations: // * model.InvalidProposalError - proposal has invalid proposer vote - Create(log zerolog.Logger, proposal *model.Proposal) (VerifyingVoteProcessor, error) + Create(log zerolog.Logger, proposal *model.SignedProposal) (VerifyingVoteProcessor, error) } diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator.go b/consensus/hotstuff/voteaggregator/vote_aggregator.go index 6471cc6ada6..efb2e476bfc 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator.go @@ -156,7 +156,7 @@ func (va *VoteAggregator) processQueuedMessages(ctx context.Context) error { msg, ok := va.queuedBlocks.Pop() if ok { - block := msg.(*model.Proposal) + block := msg.(*model.SignedProposal) err := va.processQueuedBlock(block) if err != nil { return fmt.Errorf("could not process pending block %v: %w", block.Block.BlockID, err) @@ -224,7 +224,7 @@ func (va *VoteAggregator) processQueuedVote(vote *model.Vote) error { // including the proposer's signature. Otherwise, VoteAggregator might crash or exhibit undefined // behaviour. // No errors are expected during normal operation. -func (va *VoteAggregator) processQueuedBlock(block *model.Proposal) error { +func (va *VoteAggregator) processQueuedBlock(block *model.SignedProposal) error { // check if the block is for a view that has already been pruned (and is thus stale) if block.Block.View < va.lowestRetainedView.Value() { return nil @@ -293,7 +293,7 @@ func (va *VoteAggregator) AddVote(vote *model.Vote) { // CAUTION: we expect that the input block's validity has been confirmed prior to calling AddBlock, // including the proposer's signature. Otherwise, VoteAggregator might crash or exhibit undefined // behaviour. -func (va *VoteAggregator) AddBlock(block *model.Proposal) { +func (va *VoteAggregator) AddBlock(block *model.SignedProposal) { // It's ok to silently drop blocks in case our processing pipeline is full. // It means that we are probably catching up. if ok := va.queuedBlocks.Push(block); ok { @@ -306,7 +306,7 @@ func (va *VoteAggregator) AddBlock(block *model.Proposal) { // InvalidBlock notifies the VoteAggregator about an invalid proposal, so that it // can process votes for the invalid block and slash the voters. // No errors are expected during normal operations -func (va *VoteAggregator) InvalidBlock(proposal *model.Proposal) error { +func (va *VoteAggregator) InvalidBlock(proposal *model.SignedProposal) error { slashingVoteConsumer := func(vote *model.Vote) { if proposal.Block.BlockID == vote.BlockID { va.notifier.OnVoteForInvalidBlockDetected(vote, proposal) diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go index 006ab52b744..acc88729eb1 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go @@ -84,13 +84,13 @@ func (s *VoteAggregatorTestSuite) TestOnFinalizedBlock() { // an input to AddBlock (only expects _valid_ blocks per API contract). // The exception should be propagated to the VoteAggregator's internal `ComponentManager`. func (s *VoteAggregatorTestSuite) TestProcessInvalidBlock() { - block := helper.MakeProposal( + block := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithBlockView(100), ), ), - ) + ))) processed := make(chan struct{}) collector := mocks.NewVoteCollector(s.T()) collector.On("ProcessBlock", block).Run(func(_ mock.Arguments) { diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go index 5b4abf1691c..74c07af04bc 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go @@ -57,7 +57,7 @@ func (s *CombinedVoteProcessorV2TestSuite) SetupTest() { s.reconstructor = &mockhotstuff.RandomBeaconReconstructor{} s.packer = &mockhotstuff.Packer{} - s.proposal = helper.MakeProposal() + s.proposal = helper.MakeSignedProposal() s.minRequiredShares = 9 // we require 9 RB shares to reconstruct signature s.rbSharesTotal = 0 @@ -894,7 +894,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { require.NoError(t, err) vote, err := safetyRules.SignOwnProposal(proposal) require.NoError(t, err) - proposal.SigData = vote.SigData + signedProposal := helper.MakeSignedProposal(helper.WithProposal(proposal), helper.WithSigData(vote.SigData)) qcCreated := false onQCCreated := func(qc *flow.QuorumCertificate) { @@ -912,7 +912,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { } voteProcessorFactory := NewCombinedVoteProcessorFactory(committee, onQCCreated) - voteProcessor, err := voteProcessorFactory.Create(unittest.Logger(), proposal) + voteProcessor, err := voteProcessorFactory.Create(unittest.Logger(), signedProposal) require.NoError(t, err) // process votes by new leader, this will result in producing new QC diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go index f4e7e6c385c..f68f5dedaff 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go @@ -58,7 +58,7 @@ func (s *CombinedVoteProcessorV3TestSuite) SetupTest() { s.rbSigAggregator = &mockhotstuff.WeightedSignatureAggregator{} s.reconstructor = &mockhotstuff.RandomBeaconReconstructor{} s.packer = &mockhotstuff.Packer{} - s.proposal = helper.MakeProposal() + s.proposal = helper.MakeSignedProposal() s.minRequiredShares = 9 // we require 9 RB shares to reconstruct signature s.thresholdTotalWeight, s.rbSharesTotal = atomic.Uint64{}, atomic.Uint64{} @@ -981,9 +981,7 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { } leader := stakingSigners[0] - - block := helper.MakeBlock(helper.WithBlockView(proposerView), - helper.WithBlockProposer(leader.NodeID)) + block := helper.MakeBlock(helper.WithBlockView(proposerView), helper.WithBlockProposer(leader.NodeID)) inmemDKG, err := inmem.DKGFromEncodable(inmem.EncodableDKG{ GroupKey: encodable.RandomBeaconPubKey{ @@ -1010,8 +1008,9 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { } // create and sign proposal - proposal, err := signers[leader.NodeID].CreateProposal(block) + leaderVote, err := signers[leader.NodeID].CreateVote(block) require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block))), helper.WithSigData(leaderVote.SigData)) qcCreated := false onQCCreated := func(qc *flow.QuorumCertificate) { diff --git a/consensus/hotstuff/votecollector/factory.go b/consensus/hotstuff/votecollector/factory.go index 2c515fc052c..b444bc35ca7 100644 --- a/consensus/hotstuff/votecollector/factory.go +++ b/consensus/hotstuff/votecollector/factory.go @@ -16,7 +16,7 @@ import ( // CAUTION: the baseFactory creates the VerifyingVoteProcessor for the given block. It // does _not_ check the proposer's vote for its own block. The API reflects this by // expecting a `model.Block` as input (which does _not_ contain the proposer vote) as -// opposed to `model.Proposal` (combines block with proposer's vote). +// opposed to `model.SignedProposal` (combines block with proposer's vote). // Therefore, baseFactory does _not_ implement `hotstuff.VoteProcessorFactory` by itself. // The VoteProcessorFactory adds the missing logic to verify the proposer's vote, by // wrapping the baseFactory (decorator pattern). @@ -40,7 +40,7 @@ var _ hotstuff.VoteProcessorFactory = (*VoteProcessorFactory)(nil) // A VerifyingVoteProcessor are only created for proposals with valid proposer votes. // Expected error returns during normal operations: // * model.InvalidProposalError - proposal has invalid proposer vote -func (f *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { +func (f *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { processor, err := f.baseFactory(log, proposal.Block) if err != nil { return nil, fmt.Errorf("instantiating vote processor for block %v failed: %w", proposal.Block.BlockID, err) diff --git a/consensus/hotstuff/votecollector/factory_test.go b/consensus/hotstuff/votecollector/factory_test.go index 9adeaef98f8..40207150e86 100644 --- a/consensus/hotstuff/votecollector/factory_test.go +++ b/consensus/hotstuff/votecollector/factory_test.go @@ -19,7 +19,7 @@ import ( func TestVoteProcessorFactory_CreateWithValidProposal(t *testing.T) { mockedFactory := mockhotstuff.VoteProcessorFactory{} - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} mockedProcessor.On("Process", proposal.ProposerVote()).Return(nil).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() @@ -44,7 +44,7 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { mockedFactory := mockhotstuff.VoteProcessorFactory{} t.Run("invalid-vote", func(t *testing.T) { - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} mockedProcessor.On("Process", proposal.ProposerVote()).Return(model.NewInvalidVoteErrorf(proposal.ProposerVote(), "")).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() @@ -63,7 +63,7 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { mockedProcessor.AssertExpectations(t) }) t.Run("process-vote-exception", func(t *testing.T) { - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} exception := errors.New("process-exception") mockedProcessor.On("Process", proposal.ProposerVote()).Return(exception).Once() @@ -93,7 +93,7 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { func TestVoteProcessorFactory_CreateProcessException(t *testing.T) { mockedFactory := mockhotstuff.VoteProcessorFactory{} - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() exception := errors.New("create-exception") mockedFactory.On("Create", unittest.Logger(), proposal).Return(nil, exception).Once() diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index b536a8ab1da..1b096419c4d 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -285,8 +285,10 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { } // create and sign proposal - proposal, err := signers[leader.NodeID].CreateProposal(block) + leaderVote, err := signers[leader.NodeID].CreateVote(block) require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal( + helper.MakeProposal(helper.WithBlock(block))), helper.WithSigData(leaderVote.SigData)) qcCreated := false onQCCreated := func(qc *flow.QuorumCertificate) { diff --git a/consensus/hotstuff/votecollector/statemachine.go b/consensus/hotstuff/votecollector/statemachine.go index d62159ea9ef..60558cf2aaf 100644 --- a/consensus/hotstuff/votecollector/statemachine.go +++ b/consensus/hotstuff/votecollector/statemachine.go @@ -18,7 +18,7 @@ var ( ) // VerifyingVoteProcessorFactory generates hotstuff.VerifyingVoteCollector instances -type VerifyingVoteProcessorFactory = func(log zerolog.Logger, proposal *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) +type VerifyingVoteProcessorFactory = func(log zerolog.Logger, proposal *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) // VoteCollector implements a state machine for transition between different states of vote collector type VoteCollector struct { @@ -175,7 +175,7 @@ func (m *VoteCollector) View() uint64 { // CachingVotes -> VerifyingVotes // CachingVotes -> Invalid // VerifyingVotes -> Invalid -func (m *VoteCollector) ProcessBlock(proposal *model.Proposal) error { +func (m *VoteCollector) ProcessBlock(proposal *model.SignedProposal) error { if proposal.Block.View != m.View() { return fmt.Errorf("this VoteCollector requires a proposal for view %d but received block %v with view %d", @@ -243,7 +243,7 @@ func (m *VoteCollector) RegisterVoteConsumer(consumer hotstuff.VoteConsumer) { // Error returns: // * ErrDifferentCollectorState if the VoteCollector's state is _not_ `CachingVotes` // * all other errors are unexpected and potential symptoms of internal bugs or state corruption (fatal) -func (m *VoteCollector) caching2Verifying(proposal *model.Proposal) error { +func (m *VoteCollector) caching2Verifying(proposal *model.SignedProposal) error { blockID := proposal.Block.BlockID newProc, err := m.createVerifyingProcessor(m.log, proposal) if err != nil { diff --git a/consensus/hotstuff/votecollector/statemachine_test.go b/consensus/hotstuff/votecollector/statemachine_test.go index 007dcce1fe2..1f6409c3136 100644 --- a/consensus/hotstuff/votecollector/statemachine_test.go +++ b/consensus/hotstuff/votecollector/statemachine_test.go @@ -51,7 +51,7 @@ func (s *StateMachineTestSuite) SetupTest() { s.mockedProcessors = make(map[flow.Identifier]*mocks.VerifyingVoteProcessor) s.notifier = mocks.NewVoteAggregationConsumer(s.T()) - s.factoryMethod = func(log zerolog.Logger, block *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { + s.factoryMethod = func(log zerolog.Logger, block *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { if processor, found := s.mockedProcessors[block.Block.BlockID]; found { return processor, nil } @@ -64,7 +64,7 @@ func (s *StateMachineTestSuite) SetupTest() { // prepareMockedProcessor prepares a mocked processor and stores it in map, later it will be used // to mock behavior of verifying vote processor. -func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *model.Proposal) *mocks.VerifyingVoteProcessor { +func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *model.SignedProposal) *mocks.VerifyingVoteProcessor { processor := &mocks.VerifyingVoteProcessor{} processor.On("Block").Return(func() *model.Block { return proposal.Block @@ -78,7 +78,7 @@ func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *model.Proposal) // when proposal processing can possibly change state of collector func (s *StateMachineTestSuite) TestStatus_StateTransitions() { block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block)))) s.prepareMockedProcessor(proposal) // by default, we should create in caching status @@ -90,9 +90,7 @@ func (s *StateMachineTestSuite) TestStatus_StateTransitions() { require.Equal(s.T(), hotstuff.VoteCollectorStatusVerifying, s.collector.Status()) // after submitting double proposal we should transfer into invalid state - err = s.collector.ProcessBlock(helper.MakeProposal( - helper.WithBlock( - helper.MakeBlock(helper.WithBlockView(s.view))))) + err = s.collector.ProcessBlock(makeSignedProposalWithView(s.view)) require.NoError(s.T(), err) require.Equal(s.T(), hotstuff.VoteCollectorStatusInvalid, s.collector.Status()) } @@ -101,13 +99,14 @@ func (s *StateMachineTestSuite) TestStatus_StateTransitions() { // factory are handed through (potentially wrapped), but are not replaced. func (s *StateMachineTestSuite) Test_FactoryErrorPropagation() { factoryError := errors.New("factory error") - factory := func(log zerolog.Logger, block *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { + factory := func(log zerolog.Logger, block *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { return nil, factoryError } s.collector.createVerifyingProcessor = factory // failing to create collector has to result in error and won't change state - err := s.collector.ProcessBlock(helper.MakeProposal(helper.WithBlock(helper.MakeBlock(helper.WithBlockView(s.view))))) + proposal := makeSignedProposalWithView(s.view) + err := s.collector.ProcessBlock(proposal) require.ErrorIs(s.T(), err, factoryError) require.Equal(s.T(), hotstuff.VoteCollectorStatusCaching, s.collector.Status()) } @@ -115,8 +114,8 @@ func (s *StateMachineTestSuite) Test_FactoryErrorPropagation() { // TestAddVote_VerifyingState tests that AddVote correctly process valid and invalid votes as well // as repeated, invalid and double votes in verifying state func (s *StateMachineTestSuite) TestAddVote_VerifyingState() { - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) err := s.collector.ProcessBlock(proposal) require.NoError(s.T(), err) @@ -203,8 +202,8 @@ func (s *StateMachineTestSuite) TestAddVote_VerifyingState() { // are sent to vote processor func (s *StateMachineTestSuite) TestProcessBlock_ProcessingOfCachedVotes() { votes := 10 - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) for i := 0; i < votes; i++ { vote := unittest.VoteForBlockFixture(block) @@ -226,11 +225,12 @@ func (s *StateMachineTestSuite) TestProcessBlock_ProcessingOfCachedVotes() { // Test_VoteProcessorErrorPropagation verifies that unexpected errors from the `VoteProcessor` // are propagated up the call stack (potentially wrapped), but are not replaced. func (s *StateMachineTestSuite) Test_VoteProcessorErrorPropagation() { - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) - err := s.collector.ProcessBlock(helper.MakeProposal(helper.WithBlock(block))) + err := s.collector.ProcessBlock(helper.MakeSignedProposal( + helper.WithProposal(helper.MakeProposal(helper.WithBlock(block))))) require.NoError(s.T(), err) unexpectedError := errors.New("some unexpected error") @@ -244,8 +244,8 @@ func (s *StateMachineTestSuite) Test_VoteProcessorErrorPropagation() { // in strict ordering of arrival. func (s *StateMachineTestSuite) RegisterVoteConsumer() { votes := 10 - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) expectedVotes := make([]*model.Vote, 0) for i := 0; i < votes; i++ { @@ -273,3 +273,7 @@ func (s *StateMachineTestSuite) RegisterVoteConsumer() { require.Equal(s.T(), expectedVotes, actualVotes) } + +func makeSignedProposalWithView(view uint64) *model.SignedProposal { + return helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(helper.MakeBlock(helper.WithBlockView(view)))))) +} diff --git a/consensus/hotstuff/votecollector/testutil.go b/consensus/hotstuff/votecollector/testutil.go index e36aca23170..4c9f2d288e2 100644 --- a/consensus/hotstuff/votecollector/testutil.go +++ b/consensus/hotstuff/votecollector/testutil.go @@ -22,12 +22,12 @@ type VoteProcessorTestSuiteBase struct { stakingAggregator *mockhotstuff.WeightedSignatureAggregator minRequiredWeight uint64 - proposal *model.Proposal + proposal *model.SignedProposal } func (s *VoteProcessorTestSuiteBase) SetupTest() { s.stakingAggregator = &mockhotstuff.WeightedSignatureAggregator{} - s.proposal = helper.MakeProposal() + s.proposal = helper.MakeSignedProposal() // let's assume we have 19 nodes each with weight 100 s.sigWeight = 100 diff --git a/consensus/recovery/recover.go b/consensus/recovery/recover.go index a470aedc3ce..1d85eeab65e 100644 --- a/consensus/recovery/recover.go +++ b/consensus/recovery/recover.go @@ -12,7 +12,7 @@ import ( // BlockScanner describes a function for ingesting pending blocks. // Any returned errors are considered fatal. -type BlockScanner func(proposal *model.Proposal) error +type BlockScanner func(proposal *model.SignedProposal) error // Recover is a utility method for recovering the HotStuff state after a restart. // It receives the list `pending` containing _all_ blocks that @@ -27,7 +27,7 @@ func Recover(log zerolog.Logger, pending []*flow.Header, scanners ...BlockScanne // add all pending blocks to forks for _, header := range pending { - proposal := model.ProposalFromFlow(header) // convert the header into a proposal + proposal := model.SignedProposalFromFlow(header) // convert the header into a proposal for _, s := range scanners { err := s(proposal) if err != nil { @@ -48,7 +48,7 @@ func Recover(log zerolog.Logger, pending []*flow.Header, scanners ...BlockScanne // finalized block. Caution, input blocks must be valid and in parent-first order // (unless parent is the latest finalized block). func ForksState(forks hotstuff.Forks) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { err := forks.AddValidatedBlock(proposal.Block) if err != nil { return fmt.Errorf("could not add block %v to forks: %w", proposal.Block.BlockID, err) @@ -63,7 +63,7 @@ func ForksState(forks hotstuff.Forks) BlockScanner { // // Caution: input blocks must be valid. func VoteAggregatorState(voteAggregator hotstuff.VoteAggregator) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { voteAggregator.AddBlock(proposal) return nil } @@ -72,7 +72,7 @@ func VoteAggregatorState(voteAggregator hotstuff.VoteAggregator) BlockScanner { // CollectParentQCs collects all parent QCs included in the blocks descending from the // latest finalized block. Caution, input blocks must be valid. func CollectParentQCs(collector Collector[*flow.QuorumCertificate]) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { qc := proposal.Block.QC if qc != nil { collector.Append(qc) @@ -84,7 +84,7 @@ func CollectParentQCs(collector Collector[*flow.QuorumCertificate]) BlockScanner // CollectTCs collect all TCs included in the blocks descending from the // latest finalized block. Caution, input blocks must be valid. func CollectTCs(collector Collector[*flow.TimeoutCertificate]) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { tc := proposal.LastViewTC if tc != nil { collector.Append(tc) diff --git a/consensus/recovery/recover_test.go b/consensus/recovery/recover_test.go index ac0fb0c3d4f..f3db1a6c42b 100644 --- a/consensus/recovery/recover_test.go +++ b/consensus/recovery/recover_test.go @@ -19,8 +19,8 @@ func TestRecover(t *testing.T) { } // Recover with `pending` blocks and record what blocks are forwarded to `onProposal` - recovered := make([]*model.Proposal, 0) - scanner := func(block *model.Proposal) error { + recovered := make([]*model.SignedProposal, 0) + scanner := func(block *model.SignedProposal) error { recovered = append(recovered, block) return nil } @@ -30,12 +30,12 @@ func TestRecover(t *testing.T) { // should forward blocks in exact order, just converting flow.Header to pending block require.Len(t, recovered, len(pending)) for i, r := range recovered { - require.Equal(t, model.ProposalFromFlow(pending[i]), r) + require.Equal(t, model.SignedProposalFromFlow(pending[i]), r) } } func TestRecoverEmptyInput(t *testing.T) { - scanner := func(block *model.Proposal) error { + scanner := func(block *model.SignedProposal) error { require.Fail(t, "no proposal expected") return nil } diff --git a/engine/access/access_test.go b/engine/access/access_test.go index f592fd386fc..7d12c70cdcf 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -638,6 +638,8 @@ func (suite *Suite) TestGetSealedTransaction() { require.NoError(suite.T(), err) blocksToMarkExecuted, err := stdmap.NewTimes(100) require.NoError(suite.T(), err) + blockTransactions, err := stdmap.NewIdentifierMap(100) + require.NoError(suite.T(), err) execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( suite.log, @@ -678,6 +680,7 @@ func (suite *Suite) TestGetSealedTransaction() { blocksToMarkExecuted, collections, all.Blocks, + blockTransactions, ) require.NoError(suite.T(), err) @@ -826,6 +829,8 @@ func (suite *Suite) TestGetTransactionResult() { require.NoError(suite.T(), err) blocksToMarkExecuted, err := stdmap.NewTimes(100) require.NoError(suite.T(), err) + blockTransactions, err := stdmap.NewIdentifierMap(100) + require.NoError(suite.T(), err) execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( suite.log, @@ -865,6 +870,7 @@ func (suite *Suite) TestGetTransactionResult() { blocksToMarkExecuted, collections, all.Blocks, + blockTransactions, ) require.NoError(suite.T(), err) @@ -1105,6 +1111,8 @@ func (suite *Suite) TestExecuteScript() { require.NoError(suite.T(), err) blocksToMarkExecuted, err := stdmap.NewTimes(100) require.NoError(suite.T(), err) + blockTransactions, err := stdmap.NewIdentifierMap(100) + require.NoError(suite.T(), err) collectionExecutedMetric, err := indexer.NewCollectionExecutedMetricImpl( suite.log, @@ -1114,6 +1122,7 @@ func (suite *Suite) TestExecuteScript() { blocksToMarkExecuted, collections, all.Blocks, + blockTransactions, ) require.NoError(suite.T(), err) diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index dd6404390fc..2f5b0169b34 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -132,6 +132,8 @@ func (s *Suite) SetupTest() { require.NoError(s.T(), err) blocksToMarkExecuted, err := stdmap.NewTimes(100) require.NoError(s.T(), err) + blockTransactions, err := stdmap.NewIdentifierMap(100) + require.NoError(s.T(), err) s.proto.state.On("Identity").Return(s.obsIdentity, nil) s.proto.state.On("Params").Return(s.proto.params) @@ -177,6 +179,7 @@ func (s *Suite) SetupTest() { blocksToMarkExecuted, s.collections, s.blocks, + blockTransactions, ) require.NoError(s.T(), err) } diff --git a/engine/collection/compliance/core.go b/engine/collection/compliance/core.go index c341d7cb146..dc5432d2925 100644 --- a/engine/collection/compliance/core.go +++ b/engine/collection/compliance/core.go @@ -260,7 +260,7 @@ func (c *Core) processBlockAndDescendants(proposal flow.Slashable[*cluster.Block }) // notify VoteAggregator about the invalid block - err = c.voteAggregator.InvalidBlock(model.ProposalFromFlow(header)) + err = c.voteAggregator.InvalidBlock(model.SignedProposalFromFlow(header)) if err != nil { if mempool.IsBelowPrunedThresholdError(err) { log.Warn().Msg("received invalid block, but is below pruned threshold") @@ -317,7 +317,7 @@ func (c *Core) processBlockProposal(proposal *cluster.Block) error { Logger() log.Info().Msg("processing block proposal") - hotstuffProposal := model.ProposalFromFlow(header) + hotstuffProposal := model.SignedProposalFromFlow(header) err := c.validator.ValidateProposal(hotstuffProposal) if err != nil { if model.IsInvalidProposalError(err) { diff --git a/engine/collection/compliance/core_test.go b/engine/collection/compliance/core_test.go index 2ed1292ee32..6fd27bc3963 100644 --- a/engine/collection/compliance/core_test.go +++ b/engine/collection/compliance/core_test.go @@ -206,7 +206,7 @@ func (cs *CoreSuite) TestOnBlockProposalValidParent() { // store the data for retrieval cs.headerDB[block.Header.ParentID] = cs.head - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal) @@ -232,7 +232,7 @@ func (cs *CoreSuite) TestOnBlockProposalValidAncestor() { cs.headerDB[parent.ID()] = &parent cs.headerDB[ancestor.ID()] = &ancestor - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() @@ -280,7 +280,7 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { parent := unittest.ClusterBlockWithParent(&ancestor) block := unittest.ClusterBlockWithParent(&parent) proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) // store the data for retrieval cs.headerDB[parent.ID()] = &parent @@ -363,7 +363,7 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { parent := unittest.ClusterBlockWithParent(&ancestor) block := unittest.ClusterBlockWithParent(&parent) proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) // store the data for retrieval cs.headerDB[parent.ID()] = &parent @@ -476,7 +476,7 @@ func (cs *CoreSuite) TestProcessBlockAndDescendants() { cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending3) for _, block := range []cluster.Block{parent, block1, block2, block3} { - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() @@ -552,7 +552,7 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { } cs.hotstuff.On("SubmitProposal", mock.Anything).Times(4).Run( func(args mock.Arguments) { - header := args.Get(0).(*model.Proposal).Block + header := args.Get(0).(*model.SignedProposal).Block assert.Equal(cs.T(), order[index], header.BlockID, "should submit correct header to hotstuff") index++ cs.headerDB[header.BlockID] = proposalsLookup[header.BlockID] diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index 5ad01b19566..5c3a2c9cd99 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -165,7 +165,7 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { for i := 0; i < blockCount; i++ { block := unittest.ClusterBlockWithParent(cs.head) proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Return().Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() @@ -183,7 +183,7 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { block := unittest.ClusterBlockWithParent(cs.head) proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() diff --git a/engine/collection/message_hub/message_hub.go b/engine/collection/message_hub/message_hub.go index f2241dffb73..896d5ce0b5a 100644 --- a/engine/collection/message_hub/message_hub.go +++ b/engine/collection/message_hub/message_hub.go @@ -398,7 +398,7 @@ func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime return } - hotstuffProposal := model.ProposalFromFlow(proposal) + hotstuffProposal := model.SignedProposalFromFlow(proposal) // notify vote aggregator that new block proposal is available, in case we are next leader h.voteAggregator.AddBlock(hotstuffProposal) // non-blocking diff --git a/engine/collection/message_hub/message_hub_test.go b/engine/collection/message_hub/message_hub_test.go index d6032fa8e6f..d1f66a1af90 100644 --- a/engine/collection/message_hub/message_hub_test.go +++ b/engine/collection/message_hub/message_hub_test.go @@ -273,7 +273,7 @@ func (s *MessageHubSuite) TestOnOwnProposal() { expectedBroadcastMsg := messages.NewClusterBlockProposal(&block) submitted := make(chan struct{}) // closed when proposal is submitted to hotstuff - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) s.voteAggregator.On("AddBlock", hotstuffProposal).Once() s.hotstuff.On("SubmitProposal", hotstuffProposal). Run(func(args mock.Arguments) { close(submitted) }). @@ -334,7 +334,7 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { s.payloads.On("ByBlockID", proposal.Header.ID()).Return(proposal.Payload, nil) // unset chain and height to make sure they are correctly reconstructed - hotstuffProposal := model.ProposalFromFlow(proposal.Header) + hotstuffProposal := model.SignedProposalFromFlow(proposal.Header) s.voteAggregator.On("AddBlock", hotstuffProposal) s.hotstuff.On("SubmitProposal", hotstuffProposal) expectedBroadcastMsg := messages.NewClusterBlockProposal(&proposal) diff --git a/engine/common/follower/compliance_core.go b/engine/common/follower/compliance_core.go index fa297b13902..2a65a2f3df2 100644 --- a/engine/common/follower/compliance_core.go +++ b/engine/common/follower/compliance_core.go @@ -111,7 +111,7 @@ func (c *ComplianceCore) OnBlockRange(originID flow.Identifier, batch []*flow.Bl firstBlock := batch[0].Header lastBlock := batch[len(batch)-1].Header - hotstuffProposal := model.ProposalFromFlow(lastBlock) + hotstuffProposal := model.SignedProposalFromFlow(lastBlock) log := c.log.With(). Hex("origin_id", originID[:]). Str("chain_id", lastBlock.ChainID.String()). diff --git a/engine/common/follower/compliance_core_test.go b/engine/common/follower/compliance_core_test.go index 522fc26160e..8930d2e19ce 100644 --- a/engine/common/follower/compliance_core_test.go +++ b/engine/common/follower/compliance_core_test.go @@ -97,7 +97,7 @@ func (s *CoreSuite) TestProcessingSingleBlock() { block := unittest.BlockWithParentFixture(s.finalizedBlock) // incoming block has to be validated - s.validator.On("ValidateProposal", model.ProposalFromFlow(block.Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromFlow(block.Header)).Return(nil).Once() err := s.core.OnBlockRange(s.originID, []*flow.Block{block}) require.NoError(s.T(), err) @@ -114,7 +114,7 @@ func (s *CoreSuite) TestAddFinalizedBlock() { block.Header.View = s.finalizedBlock.View - 1 // block is below finalized view // incoming block has to be validated - s.validator.On("ValidateProposal", model.ProposalFromFlow(block.Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromFlow(block.Header)).Return(nil).Once() err := s.core.OnBlockRange(s.originID, []*flow.Block{&block}) require.NoError(s.T(), err) @@ -140,7 +140,7 @@ func (s *CoreSuite) TestProcessingRangeHappyPath() { wg.Done() }).Return().Once() } - s.validator.On("ValidateProposal", model.ProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() err := s.core.OnBlockRange(s.originID, blocks) require.NoError(s.T(), err) @@ -154,7 +154,7 @@ func (s *CoreSuite) TestProcessingNotOrderedBatch() { blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) blocks[2], blocks[3] = blocks[3], blocks[2] - s.validator.On("ValidateProposal", model.ProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() err := s.core.OnBlockRange(s.originID, blocks) require.ErrorIs(s.T(), err, cache.ErrDisconnectedBatch) @@ -164,7 +164,7 @@ func (s *CoreSuite) TestProcessingNotOrderedBatch() { func (s *CoreSuite) TestProcessingInvalidBlock() { blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) - invalidProposal := model.ProposalFromFlow(blocks[len(blocks)-1].Header) + invalidProposal := model.SignedProposalFromFlow(blocks[len(blocks)-1].Header) sentinelError := model.NewInvalidProposalErrorf(invalidProposal, "") s.validator.On("ValidateProposal", invalidProposal).Return(sentinelError).Once() s.followerConsumer.On("OnInvalidBlockDetected", flow.Slashable[model.InvalidProposalError]{ @@ -189,7 +189,7 @@ func (s *CoreSuite) TestProcessingBlocksAfterShutdown() { // to the protocol state blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) - s.validator.On("ValidateProposal", model.ProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() err := s.core.OnBlockRange(s.originID, blocks) require.NoError(s.T(), err) diff --git a/engine/common/provider/engine.go b/engine/common/provider/engine.go index 12593b614ef..745ed7dd585 100644 --- a/engine/common/provider/engine.go +++ b/engine/common/provider/engine.go @@ -266,7 +266,7 @@ func (e *Engine) onEntityRequest(request *internal.EntityRequest) error { e.log.Info(). Str("origin_id", request.OriginId.String()). Strs("entity_ids", flow.IdentifierList(entityIDs).Strings()). - Uint64("nonce", request.Nonce). // to match with the the entity request received log + Uint64("nonce", request.Nonce). // to match with the entity request received log Msg("entity response sent") return nil diff --git a/engine/consensus/compliance/core.go b/engine/consensus/compliance/core.go index 8ed733c8fe9..5ab6d26c269 100644 --- a/engine/consensus/compliance/core.go +++ b/engine/consensus/compliance/core.go @@ -271,7 +271,7 @@ func (c *Core) processBlockAndDescendants(proposal flow.Slashable[*flow.Block]) }) // notify VoteAggregator about the invalid block - err = c.voteAggregator.InvalidBlock(model.ProposalFromFlow(header)) + err = c.voteAggregator.InvalidBlock(model.SignedProposalFromFlow(header)) if err != nil { if mempool.IsBelowPrunedThresholdError(err) { log.Warn().Msg("received invalid block, but is below pruned threshold") @@ -327,7 +327,7 @@ func (c *Core) processBlockProposal(proposal *flow.Block) error { ) defer span.End() - hotstuffProposal := model.ProposalFromFlow(header) + hotstuffProposal := model.SignedProposalFromFlow(header) err := c.validator.ValidateProposal(hotstuffProposal) if err != nil { if model.IsInvalidProposalError(err) { diff --git a/engine/consensus/compliance/core_test.go b/engine/consensus/compliance/core_test.go index 494d1d0e91d..41a57a5ffc7 100644 --- a/engine/consensus/compliance/core_test.go +++ b/engine/consensus/compliance/core_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/consensus/hotstuff/helper" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" consensus "github.com/onflow/flow-go/engine/consensus/mock" @@ -285,7 +286,7 @@ func (cs *CoreSuite) TestOnBlockProposalValidParent() { // store the data for retrieval cs.headerDB[block.Header.ParentID] = cs.head - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal) @@ -314,7 +315,7 @@ func (cs *CoreSuite) TestOnBlockProposalValidAncestor() { cs.headerDB[parent.ID()] = parent.Header cs.headerDB[ancestor.ID()] = ancestor.Header - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal) @@ -363,7 +364,7 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { parent := unittest.BlockWithParentFixture(ancestor.Header) block := unittest.BlockWithParentFixture(parent.Header) proposal := unittest.ProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) // store the data for retrieval cs.headerDB[parent.ID()] = parent.Header @@ -445,7 +446,7 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { parent := unittest.BlockWithParentFixture(ancestor.Header) block := unittest.BlockWithParentFixture(parent.Header) proposal := unittest.ProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) // store the data for retrieval cs.headerDB[parent.ID()] = parent.Header @@ -551,7 +552,7 @@ func (cs *CoreSuite) TestProcessBlockAndDescendants() { cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending3) for _, block := range []*flow.Block{parent, block1, block2, block3} { - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() @@ -601,7 +602,7 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { require.NoError(cs.T(), err, "proposal buffering should pass") // make sure no block is forwarded to hotstuff - cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", model.ProposalFromFlow(&proposal.Block.Header)) + cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", model.SignedProposalFromFlow(&proposal.Block.Header)) } // check that we submit each proposal in a valid order @@ -618,7 +619,7 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { } cs.hotstuff.On("SubmitProposal", mock.Anything).Times(4).Run( func(args mock.Arguments) { - proposal := args.Get(0).(*model.Proposal) + proposal := args.Get(0).(*model.SignedProposal) header := proposal.Block if calls == 0 { // first header processed must be the common parent @@ -626,7 +627,7 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { } // mark the proposal as processed delete(unprocessed, header.BlockID) - cs.headerDB[header.BlockID] = model.ProposalToFlow(proposal) + cs.headerDB[header.BlockID] = helper.SignedProposalToFlow(proposal) calls++ }, ) diff --git a/engine/consensus/compliance/engine_test.go b/engine/consensus/compliance/engine_test.go index a82ccc558c7..e5afb20ad23 100644 --- a/engine/consensus/compliance/engine_test.go +++ b/engine/consensus/compliance/engine_test.go @@ -70,7 +70,7 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { for i := 0; i < blockCount; i++ { block := unittest.BlockWithParentFixture(cs.head) proposal := messages.NewBlockProposal(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Return().Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() @@ -88,7 +88,7 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { block := unittest.BlockWithParentFixture(cs.head) proposal := unittest.ProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Return().Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() diff --git a/engine/consensus/message_hub/message_hub.go b/engine/consensus/message_hub/message_hub.go index 07fe8c3a387..38cc34a95ca 100644 --- a/engine/consensus/message_hub/message_hub.go +++ b/engine/consensus/message_hub/message_hub.go @@ -436,7 +436,7 @@ func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime return } - hotstuffProposal := model.ProposalFromFlow(proposal) + hotstuffProposal := model.SignedProposalFromFlow(proposal) // notify vote aggregator that new block proposal is available, in case we are next leader h.voteAggregator.AddBlock(hotstuffProposal) // non-blocking diff --git a/engine/consensus/message_hub/message_hub_test.go b/engine/consensus/message_hub/message_hub_test.go index 68bd1adc59a..e5cd47ca1c1 100644 --- a/engine/consensus/message_hub/message_hub_test.go +++ b/engine/consensus/message_hub/message_hub_test.go @@ -250,7 +250,7 @@ func (s *MessageHubSuite) TestOnOwnProposal() { expectedBroadcastMsg := messages.NewBlockProposal(block) submitted := make(chan struct{}) // closed when proposal is submitted to hotstuff - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromFlow(block.Header) s.voteAggregator.On("AddBlock", hotstuffProposal).Once() s.hotstuff.On("SubmitProposal", hotstuffProposal). Run(func(args mock.Arguments) { close(submitted) }). @@ -315,7 +315,7 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { s.payloads.On("ByBlockID", proposal.Header.ID()).Return(proposal.Payload, nil) // unset chain and height to make sure they are correctly reconstructed - hotstuffProposal := model.ProposalFromFlow(proposal.Header) + hotstuffProposal := model.SignedProposalFromFlow(proposal.Header) s.voteAggregator.On("AddBlock", hotstuffProposal).Once() s.hotstuff.On("SubmitProposal", hotstuffProposal) expectedBroadcastMsg := messages.NewBlockProposal(&proposal) diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index 4eade023273..0d7899100ad 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -705,6 +705,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { }, ), ), + fvm.WithReadVersionFromNodeVersionBeacon(false), ) vm := fvm.NewVirtualMachine() @@ -816,7 +817,9 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { runtime.Config{}, func(_ runtime.Config) runtime.Runtime { return rt - }))) + })), + fvm.WithReadVersionFromNodeVersionBeacon(false), + ) vm := fvm.NewVirtualMachine() @@ -929,7 +932,9 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { runtime.Config{}, func(_ runtime.Config) runtime.Runtime { return rt - }))) + })), + fvm.WithReadVersionFromNodeVersionBeacon(false), + ) vm := fvm.NewVirtualMachine() diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go index 95d68b30f05..62889ffc479 100644 --- a/engine/execution/ingestion/core.go +++ b/engine/execution/ingestion/core.go @@ -398,6 +398,14 @@ func (e *Core) onBlockExecuted( return nil } +func nonSystemTransactionCount(result flow.ExecutionResult) uint64 { + count := uint64(0) + for _, chunk := range result.Chunks { + count += chunk.NumberOfTransactions + } + return count +} + func (e *Core) onCollection(col *flow.Collection) error { colID := col.ID() e.log.Info(). diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go deleted file mode 100644 index f3dfcaf1dd2..00000000000 --- a/engine/execution/ingestion/engine.go +++ /dev/null @@ -1,958 +0,0 @@ -package ingestion - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/execution" - "github.com/onflow/flow-go/engine/execution/computation" - "github.com/onflow/flow-go/engine/execution/ingestion/stop" - "github.com/onflow/flow-go/engine/execution/ingestion/uploader" - "github.com/onflow/flow-go/engine/execution/provider" - "github.com/onflow/flow-go/engine/execution/state" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/pruner" - "github.com/onflow/flow-go/module/mempool/entity" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/mempool/stdmap" - "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" - psEvents "github.com/onflow/flow-go/state/protocol/events" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" -) - -var _ execution_data.ProcessedHeightRecorder = (*Engine)(nil) - -// An Engine receives and saves incoming blocks. -type Engine struct { - psEvents.Noop // satisfy protocol events consumer interface - execution_data.ProcessedHeightRecorder - - unit *engine.Unit - log zerolog.Logger - collectionFetcher CollectionFetcher - headers storage.Headers // see comments on getHeaderByHeight for why we need it - blocks storage.Blocks - collections storage.Collections - computationManager computation.ComputationManager - providerEngine provider.ProviderEngine - mempool *Mempool - execState state.ExecutionState - metrics module.ExecutionMetrics - tracer module.Tracer - extensiveLogging bool - executionDataPruner *pruner.Pruner - uploader *uploader.Manager - stopControl *stop.StopControl - loader BlockLoader -} - -func New( - unit *engine.Unit, - logger zerolog.Logger, - net network.EngineRegistry, - collectionFetcher CollectionFetcher, - headers storage.Headers, - blocks storage.Blocks, - collections storage.Collections, - executionEngine computation.ComputationManager, - providerEngine provider.ProviderEngine, - execState state.ExecutionState, - metrics module.ExecutionMetrics, - tracer module.Tracer, - extLog bool, - pruner *pruner.Pruner, - uploader *uploader.Manager, - stopControl *stop.StopControl, - loader BlockLoader, -) (*Engine, error) { - log := logger.With().Str("engine", "ingestion").Logger() - - mempool := newMempool() - - eng := Engine{ - unit: unit, - log: log, - collectionFetcher: collectionFetcher, - headers: headers, - blocks: blocks, - collections: collections, - computationManager: executionEngine, - providerEngine: providerEngine, - mempool: mempool, - execState: execState, - metrics: metrics, - tracer: tracer, - extensiveLogging: extLog, - executionDataPruner: pruner, - uploader: uploader, - stopControl: stopControl, - loader: loader, - ProcessedHeightRecorder: execution_data.NewProcessedHeightRecorderManager(0), - } - - return &eng, nil -} - -// Ready returns a channel that will close when the engine has -// successfully started. -func (e *Engine) Ready() <-chan struct{} { - if e.stopControl.IsExecutionStopped() { - return e.unit.Ready() - } - - if err := e.uploader.RetryUploads(); err != nil { - e.log.Warn().Msg("failed to re-upload all ComputationResults") - } - - err := e.reloadUnexecutedBlocks() - if err != nil { - e.log.Fatal().Err(err).Msg("failed to load all unexecuted blocks") - } - - return e.unit.Ready() -} - -// Done returns a channel that will close when the engine has -// successfully stopped. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return fmt.Errorf("ingestion error does not process local events") -} - -// on nodes startup, we need to load all the unexecuted blocks to the execution queues. -// blocks have to be loaded in the way that the parent has been loaded before loading its children -func (e *Engine) reloadUnexecutedBlocks() error { - unexecuted, err := e.loader.LoadUnexecuted(e.unit.Ctx()) - if err != nil { - return fmt.Errorf("could not load unexecuted blocks: %w", err) - } - // it's possible the BlockProcessable is called during the reloading, as the follower engine - // will receive blocks before ingestion engine is ready. - // The problem with that is, since the reloading hasn't finished yet, enqueuing the new block from - // the BlockProcessable callback will fail, because its parent block might have not been reloaded - // to the queues yet. - // So one solution here is to lock the execution queues during reloading, so that if BlockProcessable - // is called before reloading is finished, it will be blocked, which will avoid that edge case. - return e.mempool.Run(func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - for _, blockID := range unexecuted { - err := e.reloadBlock(blockByCollection, executionQueues, blockID) - if err != nil { - return fmt.Errorf("could not reload block: %v, %w", blockID, err) - } - - e.log.Debug().Hex("block_id", blockID[:]).Msg("reloaded block") - } - - e.log.Info().Int("count", len(unexecuted)).Msg("all unexecuted have been successfully reloaded") - - return nil - }) -} - -func (e *Engine) reloadBlock( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - blockID flow.Identifier, -) error { - block, err := e.blocks.ByID(blockID) - if err != nil { - return fmt.Errorf("could not get block by ID: %v %w", blockID, err) - } - - // enqueue the block and check if there is any missing collections - missingCollections, err := e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, false) - - if err != nil { - return fmt.Errorf("could not enqueue block %x on reloading: %w", blockID, err) - } - - // forward the missing collections to requester engine for requesting them from collection nodes, - // adding the missing collections to mempool in order to trigger the block execution as soon as - // all missing collections are received. - err = e.fetchAndHandleCollection(blockID, block.Header.Height, missingCollections, func(collection *flow.Collection) error { - err := e.addCollectionToMempool(collection, blockByCollection) - - if err != nil { - return fmt.Errorf("could not add collection to mempool: %w", err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("could not fetch or handle collection %w", err) - } - return nil -} - -// BlockProcessable handles the new verified blocks (blocks that -// have passed consensus validation) received from the consensus nodes -// NOTE: BlockProcessable might be called multiple times for the same block. -// NOTE: Ready calls reloadUnexecutedBlocks during initialization, which handles dropped protocol events. -func (e *Engine) BlockProcessable(b *flow.Header, _ *flow.QuorumCertificate) { - - // TODO: this should not be blocking: https://github.com/onflow/flow-go/issues/4400 - - // skip if stopControl tells to skip, so that we can avoid fetching collections - // for this block - if !e.stopControl.ShouldExecuteBlock(b.ID(), b.Height) { - return - } - - blockID := b.ID() - newBlock, err := e.blocks.ByID(blockID) - if err != nil { - e.log.Fatal().Err(err).Msgf("could not get incorporated block(%v): %v", blockID, err) - } - - e.log.Info().Hex("block_id", blockID[:]). - Uint64("height", b.Height). - Msg("handling new block") - - err = e.handleBlock(e.unit.Ctx(), newBlock) - if err != nil { - e.log.Error().Err(err).Hex("block_id", blockID[:]).Msg("failed to handle block") - } -} - -// Main handling - -// handle block will process the incoming block. -// the block has passed the consensus validation. -func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { - - blockID := block.ID() - log := e.log.With().Hex("block_id", blockID[:]).Logger() - - span, _ := e.tracer.StartBlockSpan(ctx, blockID, trace.EXEHandleBlock) - defer span.End() - - executed, err := e.execState.IsBlockExecuted(block.Header.Height, blockID) - if err != nil { - return fmt.Errorf("could not check whether block is executed: %w", err) - } - - if executed { - log.Debug().Msg("block has been executed already") - return nil - } - - var missingCollections []*flow.CollectionGuarantee - // unexecuted block - // acquiring the lock so that there is only one process modifying the queue - err = e.mempool.Run(func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - missing, err := e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, false) - if err != nil { - return err - } - missingCollections = missing - return nil - }) - - if err != nil { - return fmt.Errorf("could not enqueue block %v: %w", blockID, err) - } - - return e.addOrFetch(blockID, block.Header.Height, missingCollections) -} - -func (e *Engine) enqueueBlockAndCheckExecutable( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - block *flow.Block, - checkStateSync bool, -) ([]*flow.CollectionGuarantee, error) { - executableBlock := &entity.ExecutableBlock{ - Block: block, - CompleteCollections: make(map[flow.Identifier]*entity.CompleteCollection), - } - - blockID := executableBlock.ID() - - lg := e.log.With(). - Hex("block_id", blockID[:]). - Uint64("block_height", executableBlock.Block.Header.Height). - Logger() - - // adding the block to the queue, - queue, added, head := enqueue(executableBlock, executionQueues) - - // if it's not added, it means the block is not a new block, it already - // exists in the queue, then bail - if !added { - log.Debug().Hex("block_id", logging.Entity(executableBlock)). - Int("block_height", int(executableBlock.Height())). - Msg("block already exists in the execution queue") - return nil, nil - } - - firstUnexecutedHeight := queue.Head.Item.Height() - - // check if a block is executable. - // a block is executable if the following conditions are all true - // 1) the parent state commitment is ready - // 2) the collections for the block payload are ready - // 3) the child block is ready for querying the randomness - - // check if the block's parent has been executed. (we can't execute the block if the parent has - // not been executed yet) - // check if there is a statecommitment for the parent block - parentCommitment, err := e.execState.StateCommitmentByBlockID(block.Header.ParentID) - - // if we found the statecommitment for the parent block, then add it to the executable block. - if err == nil { - executableBlock.StartState = &parentCommitment - } else if errors.Is(err, storage.ErrNotFound) { - // the parent block is an unexecuted block. - // if the queue only has one block, and its parent doesn't - // exist in the queue, then we need to load the block from the storage. - _, ok := queue.Nodes[blockID] - if !ok { - lg.Error().Msgf("an unexecuted parent block is missing in the queue") - } - } else { - // if there is exception, then crash - lg.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") - } - - // check if we have all the collections for the block, and request them if there is missing. - missingCollections, err := e.matchAndFindMissingCollections(executableBlock, blockByCollection) - if err != nil { - return nil, fmt.Errorf("cannot send collection requests: %w", err) - } - - complete := false - - // if newly enqueued block is inside any existing queue, we should skip now and wait - // for parent to finish execution - if head { - // execute the block if the block is ready to be executed - complete = e.executeBlockIfComplete(executableBlock) - } - - lg.Info(). - // if the execution is halt, but the queue keeps growing, we could check which block - // hasn't been executed. - Uint64("first_unexecuted_in_queue", firstUnexecutedHeight). - Bool("complete", complete). - Bool("head_of_queue", head). - Int("cols", len(executableBlock.Block.Payload.Guarantees)). - Int("missing_cols", len(missingCollections)). - Msg("block is enqueued") - - return missingCollections, nil -} - -// executeBlock will execute the block. -// When finish executing, it will check if the children becomes executable and execute them if yes. -func (e *Engine) executeBlock( - ctx context.Context, - executableBlock *entity.ExecutableBlock, -) { - - // don't execute the block if the stop control says no - if !e.stopControl.ShouldExecuteBlock(executableBlock.Block.Header.ID(), executableBlock.Block.Header.Height) { - return - } - - lg := e.log.With(). - Hex("block_id", logging.Entity(executableBlock)). - Uint64("height", executableBlock.Block.Header.Height). - Int("collections", len(executableBlock.CompleteCollections)). - Logger() - - lg.Info().Msg("executing block") - - startedAt := time.Now() - - span, ctx := e.tracer.StartSpanFromContext(ctx, trace.EXEExecuteBlock) - defer span.End() - - parentID := executableBlock.Block.Header.ParentID - parentErID, err := e.execState.GetExecutionResultID(ctx, parentID) - if err != nil { - lg.Err(err). - Str("parentID", parentID.String()). - Msg("could not get execution result ID for parent block") - return - } - - snapshot := e.execState.NewStorageSnapshot(*executableBlock.StartState, - executableBlock.Block.Header.ParentID, - executableBlock.Block.Header.Height-1, - ) - - computationResult, err := e.computationManager.ComputeBlock( - ctx, - parentErID, - executableBlock, - snapshot) - if err != nil { - lg.Err(err).Msg("error while computing block") - return - } - - wg := sync.WaitGroup{} - wg.Add(1) - defer wg.Wait() - - go func() { - defer wg.Done() - err := e.uploader.Upload(ctx, computationResult) - if err != nil { - lg.Err(err).Msg("error while uploading block") - // continue processing. uploads should not block execution - } - }() - - err = e.saveExecutionResults(ctx, computationResult) - if errors.Is(err, storage.ErrDataMismatch) { - lg.Fatal().Err(err).Msg("fatal: trying to store different results for the same block") - } - - if err != nil { - lg.Err(err).Msg("error while handing computation results") - return - } - - receipt := computationResult.ExecutionReceipt - broadcasted, err := e.providerEngine.BroadcastExecutionReceipt( - ctx, executableBlock.Block.Header.Height, receipt) - if err != nil { - lg.Err(err).Msg("critical: failed to broadcast the receipt") - } - - finalEndState := computationResult.CurrentEndState() - lg.Info(). - Hex("parent_block", executableBlock.Block.Header.ParentID[:]). - Int("collections", len(executableBlock.Block.Payload.Guarantees)). - Hex("start_state", executableBlock.StartState[:]). - Hex("final_state", finalEndState[:]). - Hex("receipt_id", logging.Entity(receipt)). - Hex("result_id", logging.Entity(receipt.ExecutionResult)). - Hex("execution_data_id", receipt.ExecutionResult.ExecutionDataID[:]). - Bool("state_changed", finalEndState != *executableBlock.StartState). - Uint64("num_txs", nonSystemTransactionCount(receipt.ExecutionResult)). - Bool("broadcasted", broadcasted). - Int64("timeSpentInMS", time.Since(startedAt).Milliseconds()). - Msg("block executed") - - e.stopControl.OnBlockExecuted(executableBlock.Block.Header) - - err = e.onBlockExecuted(executableBlock, finalEndState) - if err != nil { - lg.Err(err).Msg("failed in process block's children") - } - - if e.executionDataPruner != nil { - e.OnBlockProcessed(executableBlock.Height()) - } - - e.unit.Ctx() - -} - -func nonSystemTransactionCount(result flow.ExecutionResult) uint64 { - count := uint64(0) - for _, chunk := range result.Chunks { - count += chunk.NumberOfTransactions - } - return count -} - -// we've executed the block, now we need to check: -// 1. whether the state syncing can be turned off -// 2. whether its children can be executed -// the executionQueues stores blocks as a tree: -// -// 10 <- 11 <- 12 -// ^-- 13 -// 14 <- 15 <- 16 -// -// if block 10 is the one just executed, then we will remove it from the queue, and add -// its children back, meaning the tree will become: -// -// 11 <- 12 -// 13 -// 14 <- 15 <- 16 - -func (e *Engine) onBlockExecuted( - executed *entity.ExecutableBlock, - finalState flow.StateCommitment, -) error { - - e.metrics.ExecutionStorageStateCommitment(int64(len(finalState))) - e.metrics.ExecutionLastExecutedBlockHeight(executed.Block.Header.Height) - - missingCollections := make(map[*entity.ExecutableBlock][]*flow.CollectionGuarantee) - err := e.mempool.Run( - func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - // find the block that was just executed - executionQueue, exists := executionQueues.ByID(executed.ID()) - if !exists { - logQueueState(e.log, executionQueues, executed.ID()) - // when the block no longer exists in the queue, it means there was a race condition that - // two onBlockExecuted was called for the same block, and one process has already removed the - // block from the queue, so we will print an error here - return fmt.Errorf("block has been executed already, no longer exists in the queue") - } - - // dismount the executed block and all its children - _, newQueues := executionQueue.Dismount() - - // go through each children, add them back to the queue, and check - // if the children is executable - for _, queue := range newQueues { - queueID := queue.ID() - added := executionQueues.Add(queueID, queue) - if !added { - // blocks should be unique in execution queues, if we dismount all the children blocks, then - // add it back to the queues, then it should always be able to add. - // If not, then there is a bug that the queues have duplicated blocks - return fmt.Errorf("fatal error - child block already in execution queue") - } - - // the parent block has been executed, update the StartState of - // each child block. - child := queue.Head.Item.(*entity.ExecutableBlock) - child.StartState = &finalState - - missing, err := e.matchAndFindMissingCollections(child, blockByCollection) - if err != nil { - return fmt.Errorf("cannot send collection requests: %w", err) - } - if len(missing) > 0 { - missingCollections[child] = append(missingCollections[child], missing...) - } - - completed := e.executeBlockIfComplete(child) - if !completed { - e.log.Debug(). - Hex("executed_block", logging.Entity(executed)). - Hex("child_block", logging.Entity(child)). - Msg("child block is not ready to be executed yet") - } else { - e.log.Debug(). - Hex("executed_block", logging.Entity(executed)). - Hex("child_block", logging.Entity(child)). - Msg("child block is ready to be executed") - } - } - - // remove the executed block - executionQueues.Remove(executed.ID()) - - return nil - }) - - if err != nil { - e.log.Fatal().Err(err). - Hex("block", logging.Entity(executed)). - Uint64("height", executed.Block.Header.Height). - Msg("error while requeueing blocks after execution") - } - - for child, missing := range missingCollections { - err := e.addOrFetch(child.ID(), child.Block.Header.Height, missing) - if err != nil { - return fmt.Errorf("fail to add missing collections: %w", err) - } - } - - return nil -} - -// executeBlockIfComplete checks whether the block is ready to be executed. -// if yes, execute the block -// return a bool indicates whether the block was completed -func (e *Engine) executeBlockIfComplete(eb *entity.ExecutableBlock) bool { - - if eb.Executing { - return false - } - - // if don't have the delta, then check if everything is ready for executing - // the block - if eb.IsComplete() { - - if e.extensiveLogging { - e.logExecutableBlock(eb) - } - - // no external synchronisation is used because this method must be run in a thread-safe context - eb.Executing = true - - e.unit.Launch(func() { - e.executeBlock(e.unit.Ctx(), eb) - }) - return true - } - return false -} - -// OnCollection is a callback for handling the collections requested by the -// collection requester. -func (e *Engine) OnCollection(originID flow.Identifier, entity flow.Entity) { - // convert entity to strongly typed collection - collection, ok := entity.(*flow.Collection) - if !ok { - e.log.Error().Msgf("invalid entity type (%T)", entity) - return - } - - // no need to validate the origin ID, since the collection requester has - // checked the origin must be a collection node. - - err := e.handleCollection(originID, collection) - if err != nil { - e.log.Error().Err(err).Msg("could not handle collection") - } -} - -// a block can't be executed if its collection is missing. -// since a collection can belong to multiple blocks, we need to -// find all the blocks that are needing this collection, and then -// check if any of these block becomes executable and execute it if -// is. -func (e *Engine) handleCollection( - originID flow.Identifier, - collection *flow.Collection, -) error { - collID := collection.ID() - - span, _ := e.tracer.StartCollectionSpan(context.Background(), collID, trace.EXEHandleCollection) - defer span.End() - - lg := e.log.With().Hex("collection_id", collID[:]).Logger() - - lg.Info().Hex("sender", originID[:]).Int("len", collection.Len()).Msg("handle collection") - defer func(startTime time.Time) { - lg.Info().TimeDiff("duration", time.Now(), startTime).Msg("collection handled") - }(time.Now()) - - // TODO: bail if have seen this collection before. - err := e.collections.Store(collection) - if err != nil { - return fmt.Errorf("cannot store collection: %w", err) - } - - return e.mempool.BlockByCollection.Run( - func(backdata *stdmap.BlockByCollectionBackdata) error { - return e.addCollectionToMempool(collection, backdata) - }, - ) -} - -func (e *Engine) addCollectionToMempool( - collection *flow.Collection, - backdata *stdmap.BlockByCollectionBackdata, -) error { - collID := collection.ID() - blockByCollectionID, exists := backdata.ByID(collID) - - // if we don't find any block for this collection, then - // means we don't need this collection any more. - // or it was ejected from the mempool when it was full. - // either way, we will return - if !exists { - return nil - } - - for _, executableBlock := range blockByCollectionID.ExecutableBlocks { - blockID := executableBlock.ID() - - completeCollection, ok := executableBlock.CompleteCollections[collID] - if !ok { - return fmt.Errorf("cannot handle collection: internal inconsistency - collection pointing to block %v which does not contain said collection", - blockID) - } - - e.metrics.UpdateCollectionMaxHeight(executableBlock.Block.Header.Height) - - if completeCollection.IsCompleted() { - // already received transactions for this collection - continue - } - - // update the transactions of the collection - // Note: it's guaranteed the transactions are for this collection, because - // the collection id matches with the CollectionID from the collection guarantee - completeCollection.Transactions = collection.Transactions - - // check if the block becomes executable - _ = e.executeBlockIfComplete(executableBlock) - } - - // since we've received this collection, remove it from the index - // this also prevents from executing the same block twice, because the second - // time when the collection arrives, it will not be found in the blockByCollectionID - // index. - backdata.Remove(collID) - - return nil -} - -func newQueue(blockify queue.Blockify, queues *stdmap.QueuesBackdata) ( - *queue.Queue, - bool, -) { - q := queue.NewQueue(blockify) - qID := q.ID() - return q, queues.Add(qID, q) -} - -// enqueue adds a block to the queues, return the queue that includes the block and booleans -// * is block new one (it's not already enqueued, not a duplicate) -// * is head of the queue (new queue has been created) -// -// Queues are chained blocks. Since a block can't be executable until its parent has been -// executed, the chained structure allows us to only check the head of each queue to see if -// any block becomes executable. -// for instance we have one queue whose head is A: -// -// A <- B <- C -// ^- D <- E -// -// If we receive E <- F, then we will add it to the queue: -// -// A <- B <- C -// ^- D <- E <- F -// -// Even through there are 6 blocks, we only need to check if block A becomes executable. -// when the parent block isn't in the queue, we add it as a new queue. for instance, if -// we receive H <- G, then the queues will become: -// -// A <- B <- C -// ^- D <- E -// G -func enqueue(blockify queue.Blockify, queues *stdmap.QueuesBackdata) ( - *queue.Queue, - bool, - bool, -) { - for _, queue := range queues.All() { - if stored, isNew := queue.TryAdd(blockify); stored { - return queue, isNew, false - } - } - queue, isNew := newQueue(blockify, queues) - return queue, isNew, true -} - -// check if the block's collections have been received, -// if yes, add the collection to the executable block -// if no, fetch the collection. -// if a block has 3 collection, it would be 3 reqs to fetch them. -// mark the collection belongs to the block, -// mark the block contains this collection. -// It returns the missing collections to be fetched -// TODO: to rename -func (e *Engine) matchAndFindMissingCollections( - executableBlock *entity.ExecutableBlock, - collectionsBackdata *stdmap.BlockByCollectionBackdata, -) ([]*flow.CollectionGuarantee, error) { - missingCollections := make([]*flow.CollectionGuarantee, 0, len(executableBlock.Block.Payload.Guarantees)) - - for _, guarantee := range executableBlock.Block.Payload.Guarantees { - coll := &entity.CompleteCollection{ - Guarantee: guarantee, - } - executableBlock.CompleteCollections[guarantee.ID()] = coll - - // check if we have requested this collection before. - // blocksNeedingCollection stores all the blocks that contain this collection - - if blocksNeedingCollection, exists := collectionsBackdata.ByID(guarantee.ID()); exists { - // if we've requested this collection, it means other block might also contain this collection. - // in this case, add this block to the map so that when the collection is received, - // we could update the executable block - blocksNeedingCollection.ExecutableBlocks[executableBlock.ID()] = executableBlock - - // since the collection is still being requested, we don't have the transactions - // yet, so exit - continue - } - - // the storage doesn't have this collection, meaning this is our first time seeing this - // collection guarantee, create an entry to store in collectionsBackdata in order to - // update the executable blocks when the collection is received. - blocksNeedingCollection := &entity.BlocksByCollection{ - CollectionID: guarantee.ID(), - ExecutableBlocks: map[flow.Identifier]*entity.ExecutableBlock{executableBlock.ID(): executableBlock}, - } - - added := collectionsBackdata.Add(blocksNeedingCollection.ID(), blocksNeedingCollection) - if !added { - // sanity check, should not happen, unless mempool implementation has a bug - return nil, fmt.Errorf("collection already mapped to block") - } - - missingCollections = append(missingCollections, guarantee) - } - - return missingCollections, nil -} - -// save the execution result of a block -func (e *Engine) saveExecutionResults( - ctx context.Context, - result *execution.ComputationResult, -) error { - span, childCtx := e.tracer.StartSpanFromContext(ctx, trace.EXESaveExecutionResults) - defer span.End() - - e.log.Debug(). - Hex("block_id", logging.Entity(result.ExecutableBlock)). - Msg("received computation result") - - for _, event := range result.ExecutionResult.ServiceEvents { - e.log.Info(). - Uint64("block_height", result.ExecutableBlock.Height()). - Hex("block_id", logging.Entity(result.ExecutableBlock)). - Str("event_type", event.Type.String()). - Msg("service event emitted") - } - - err := e.execState.SaveExecutionResults(childCtx, result) - if err != nil { - return fmt.Errorf("cannot persist execution state: %w", err) - } - - finalEndState := result.CurrentEndState() - e.log.Debug(). - Hex("block_id", logging.Entity(result.ExecutableBlock)). - Hex("start_state", result.ExecutableBlock.StartState[:]). - Hex("final_state", finalEndState[:]). - Msg("saved computation results") - - return nil -} - -// logExecutableBlock logs all data about an executable block -// over time we should skip this -func (e *Engine) logExecutableBlock(eb *entity.ExecutableBlock) { - // log block - e.log.Debug(). - Hex("block_id", logging.Entity(eb)). - Hex("prev_block_id", logging.ID(eb.Block.Header.ParentID)). - Uint64("block_height", eb.Block.Header.Height). - Int("number_of_collections", len(eb.Collections())). - RawJSON("block_header", logging.AsJSON(eb.Block.Header)). - Msg("extensive log: block header") - - // logs transactions - for i, col := range eb.Collections() { - for j, tx := range col.Transactions { - e.log.Debug(). - Hex("block_id", logging.Entity(eb)). - Int("block_height", int(eb.Block.Header.Height)). - Hex("prev_block_id", logging.ID(eb.Block.Header.ParentID)). - Int("collection_index", i). - Int("tx_index", j). - Hex("collection_id", logging.ID(col.Guarantee.CollectionID)). - Hex("tx_hash", logging.Entity(tx)). - Hex("start_state_commitment", eb.StartState[:]). - RawJSON("transaction", logging.AsJSON(tx)). - Msg("extensive log: executed tx content") - } - } -} - -// addOrFetch checks if there are stored collections for the given guarantees, if there is, -// forward them to mempool to process the collection, otherwise fetch the collections. -// any error returned are exception -func (e *Engine) addOrFetch( - blockID flow.Identifier, - height uint64, - guarantees []*flow.CollectionGuarantee, -) error { - return e.fetchAndHandleCollection(blockID, height, guarantees, func(collection *flow.Collection) error { - err := e.mempool.BlockByCollection.Run( - func(backdata *stdmap.BlockByCollectionBackdata) error { - return e.addCollectionToMempool(collection, backdata) - }) - - if err != nil { - return fmt.Errorf("could not add collection to mempool: %w", err) - } - return nil - }) -} - -// addOrFetch checks if there are stored collections for the given guarantees, if there is, -// forward them to the handler to process the collection, otherwise fetch the collections. -// any error returned are exception -func (e *Engine) fetchAndHandleCollection( - blockID flow.Identifier, - height uint64, - guarantees []*flow.CollectionGuarantee, - handleCollection func(*flow.Collection) error, -) error { - fetched := false - for _, guarantee := range guarantees { - // if we've requested this collection, we will store it in the storage, - // so check the storage to see whether we've seen it. - collection, err := e.collections.ByID(guarantee.CollectionID) - - if err == nil { - // we found the collection from storage, forward this collection to handler - err = handleCollection(collection) - if err != nil { - return fmt.Errorf("could not handle collection: %w", err) - } - - continue - } - - // check if there was exception - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("error while querying for collection: %w", err) - } - - err = e.collectionFetcher.FetchCollection(blockID, height, guarantee) - if err != nil { - return fmt.Errorf("could not fetch collection: %w", err) - } - fetched = true - } - - // make sure that the requests are dispatched immediately by the requester - if fetched { - e.collectionFetcher.Force() - e.metrics.ExecutionCollectionRequestSent() - } - - return nil -} - -func logQueueState(log zerolog.Logger, queues *stdmap.QueuesBackdata, blockID flow.Identifier) { - all := queues.All() - - log.With().Hex("queue_state__executed_block_id", blockID[:]).Int("count", len(all)).Logger() - for i, queue := range all { - log.Error().Msgf("%v-th queue state: %v", i, queue.String()) - } -} diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go deleted file mode 100644 index b7d5a3665d6..00000000000 --- a/engine/execution/ingestion/engine_test.go +++ /dev/null @@ -1,827 +0,0 @@ -package ingestion - -import ( - "context" - "crypto/rand" - "fmt" - "sync" - "testing" - "time" - - "github.com/onflow/crypto" - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - enginePkg "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/execution" - computation "github.com/onflow/flow-go/engine/execution/computation/mock" - "github.com/onflow/flow-go/engine/execution/ingestion/loader" - "github.com/onflow/flow-go/engine/execution/ingestion/mocks" - "github.com/onflow/flow-go/engine/execution/ingestion/stop" - "github.com/onflow/flow-go/engine/execution/ingestion/uploader" - uploadermock "github.com/onflow/flow-go/engine/execution/ingestion/uploader/mock" - provider "github.com/onflow/flow-go/engine/execution/provider/mock" - stateMock "github.com/onflow/flow-go/engine/execution/state/mock" - "github.com/onflow/flow-go/fvm/storage/snapshot" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool/entity" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network/mocknetwork" - protocol "github.com/onflow/flow-go/state/protocol/mock" - storageerr "github.com/onflow/flow-go/storage" - storage "github.com/onflow/flow-go/storage/mock" - "github.com/onflow/flow-go/utils/unittest" -) - -type testingContext struct { - t *testing.T - engine *Engine - headers *storage.Headers - blocks *storage.Blocks - collections *mocks.MockCollectionStore - state *protocol.State - computationManager *computation.ComputationManager - providerEngine *provider.ProviderEngine - executionState *stateMock.ExecutionState - stopControl *stop.StopControl - uploadMgr *uploader.Manager - fetcher *mocks.MockFetcher - - mu *sync.Mutex -} - -func runWithEngine(t *testing.T, f func(testingContext)) { - - net := new(mocknetwork.EngineRegistry) - - // generates signing identity including staking key for signing - seed := make([]byte, crypto.KeyGenSeedMinLen) - n, err := rand.Read(seed) - require.Equal(t, n, crypto.KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed) - require.NoError(t, err) - myIdentity := unittest.IdentityFixture() - myIdentity.Role = flow.RoleExecution - myIdentity.StakingPubKey = sk.PublicKey() - - headers := storage.NewHeaders(t) - blocks := storage.NewBlocks(t) - collections := mocks.NewMockCollectionStore() - - computationManager := computation.NewComputationManager(t) - providerEngine := provider.NewProviderEngine(t) - protocolState := protocol.NewState(t) - executionState := stateMock.NewExecutionState(t) - - var engine *Engine - - defer func() { - unittest.AssertClosesBefore(t, engine.Done(), 5*time.Second, "expect to stop before timeout") - computationManager.AssertExpectations(t) - protocolState.AssertExpectations(t) - executionState.AssertExpectations(t) - providerEngine.AssertExpectations(t) - }() - - log := unittest.Logger() - metrics := metrics.NewNoopCollector() - - tracer, err := trace.NewTracer(log, "test", "test", trace.SensitivityCaptureAll) - require.NoError(t, err) - - unit := enginePkg.NewUnit() - stopControl := stop.NewStopControl( - unit, - time.Second, - zerolog.Nop(), - executionState, - headers, - nil, - nil, - &flow.Header{Height: 1}, - false, - false, - ) - - uploadMgr := uploader.NewManager(trace.NewNoopTracer()) - - fetcher := mocks.NewMockFetcher() - loader := loader.NewUnexecutedLoader(log, protocolState, headers, executionState) - - engine, err = New( - unit, - log, - net, - fetcher, - headers, - blocks, - collections, - computationManager, - providerEngine, - executionState, - metrics, - tracer, - false, - nil, - uploadMgr, - stopControl, - loader, - ) - require.NoError(t, err) - - f(testingContext{ - t: t, - engine: engine, - headers: headers, - blocks: blocks, - collections: collections, - state: protocolState, - computationManager: computationManager, - providerEngine: providerEngine, - executionState: executionState, - uploadMgr: uploadMgr, - stopControl: stopControl, - fetcher: fetcher, - - mu: &sync.Mutex{}, - }) - - <-engine.Done() -} - -// TestExecuteOneBlock verifies after collection is received, -// block is executed, uploaded, and broadcasted -func TestExecuteOneBlock(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - // create a mocked storage that has similar behavior as the real execution state. - // the mocked storage allows us to prepare results for the prepared blocks, so that - // the mocked methods know what to return, and it also allows us to verify that the - // mocked API is called with correct data. - store := mocks.NewMockBlockStore(t) - - col := unittest.CollectionFixture(1) - // Root <- A - blockA := makeBlockWithCollection(store.RootBlock, &col) - result := store.CreateBlockAndMockResult(t, blockA) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(result) - - // receive block - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(1) // wait for block A to be executed - - ctx.mockComputeBlock(store) - ctx.mockSaveExecutionResults(store, &wg) - - // verify upload will be called - uploader := uploadermock.NewUploader(ctx.t) - uploader.On("Upload", result).Return(nil).Once() - ctx.uploadMgr.AddUploader(uploader) - - // verify broadcast will be called - ctx.providerEngine.On("BroadcastExecutionReceipt", - mock.Anything, - blockA.Block.Header.Height, - result.ExecutionReceipt).Return(true, nil).Once() - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // verify collection is fetched - require.True(t, ctx.fetcher.IsFetched(col.ID())) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - }) -} - -// verify block will be executed if collection is received first -func TestExecuteBlocks(t *testing.T) { - - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - col1 := unittest.CollectionFixture(1) - col2 := unittest.CollectionFixture(1) - // Root <- A[C1] <- B[C2] - // prepare two blocks, so that receiving C2 before C1 won't trigger any block to be executed, - // which creates the case where C2 collection is received first, and block B will become - // executable as soon as its parent block A is executed. - blockA := makeBlockWithCollection(store.RootBlock, &col1) - blockB := makeBlockWithCollection(blockA.Block.Header, &col2) - resultA := store.CreateBlockAndMockResult(t, blockA) - resultB := store.CreateBlockAndMockResult(t, blockB) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - ctx.mockNewStorageSnapshot(resultB) - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - - // receive block - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(2) // wait for 2 blocks to be executed - ctx.mockSaveExecutionResults(store, &wg) - - require.NoError(t, ctx.engine.handleCollection(unittest.IdentifierFixture(), &col2)) - require.NoError(t, ctx.engine.handleCollection(unittest.IdentifierFixture(), &col1)) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // verify collection is fetched - require.True(t, ctx.fetcher.IsFetched(col1.ID())) - require.True(t, ctx.fetcher.IsFetched(col2.ID())) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertExecuted(t, "B", blockB.ID()) - }) -} - -// verify block will be executed if collection is already in storage -func TestExecuteNextBlockIfCollectionIsReady(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - col1 := unittest.CollectionFixture(1) - col2 := unittest.CollectionFixture(1) - - // Root <- A[C1] <- B[C2] - blockA := makeBlockWithCollection(store.RootBlock, &col1) - blockB := makeBlockWithCollection(blockA.Block.Header, &col2) - resultA := store.CreateBlockAndMockResult(t, blockA) - resultB := store.CreateBlockAndMockResult(t, blockB) - - // C2 is available in storage - require.NoError(t, ctx.collections.Store(&col2)) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - ctx.mockNewStorageSnapshot(resultB) - - // receiving block A and B will not trigger any execution - // because A is missing collection C1, B is waiting for A to be executed - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(2) // waiting for A and B to be executed - ctx.mockSaveExecutionResults(store, &wg) - - // receiving collection C1 will execute both A and B - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col1) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // verify collection is fetched - require.True(t, ctx.fetcher.IsFetched(col1.ID())) - require.False(t, ctx.fetcher.IsFetched(col2.ID())) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertExecuted(t, "B", blockB.ID()) - }) -} - -// verify block will only be executed once even if block or collection are received multiple times -func TestExecuteBlockOnlyOnce(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - col := unittest.CollectionFixture(1) - // Root <- A[C] - blockA := makeBlockWithCollection(store.RootBlock, &col) - resultA := store.CreateBlockAndMockResult(t, blockA) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - - // receive block - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - // receive block again before collection is received - err = ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(1) // wait for block A to be executed - ctx.mockSaveExecutionResults(store, &wg) - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col) - require.NoError(t, err) - - // receiving collection again before block is executed - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // receiving collection again after block is executed - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col) - require.NoError(t, err) - - // verify collection is fetched - require.True(t, ctx.fetcher.IsFetched(col.ID())) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - }) -} - -// given two blocks depend on the same root block and contain same collections, -// receiving all collections will trigger the execution of both blocks concurrently -func TestExecuteForkConcurrently(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - // create A and B that have the same collections and same parent - // Root <- A[C1, C2] - // <- B[C1, C2] - col1 := unittest.CollectionFixture(1) - col2 := unittest.CollectionFixture(1) - - blockA := makeBlockWithCollection(store.RootBlock, &col1, &col2) - blockB := makeBlockWithCollection(store.RootBlock, &col1, &col2) - resultA := store.CreateBlockAndMockResult(t, blockA) - resultB := store.CreateBlockAndMockResult(t, blockB) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - ctx.mockNewStorageSnapshot(resultB) - - // receive blocks - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col1) - require.NoError(t, err) - - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(2) // wait for A and B to be executed - ctx.mockSaveExecutionResults(store, &wg) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col2) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertExecuted(t, "B", blockB.ID()) - }) -} - -// verify block will be executed in order -func TestExecuteBlockInOrder(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - // create A and B that have the same collections and same parent - // Root <- A[C1, C2] - // <- B[C2] <- C[C3] - // verify receiving C3, C1, then C2 will trigger all blocks to be executed - col1 := unittest.CollectionFixture(1) - col2 := unittest.CollectionFixture(1) - col3 := unittest.CollectionFixture(1) - - blockA := makeBlockWithCollection(store.RootBlock, &col1, &col2) - blockB := makeBlockWithCollection(store.RootBlock, &col2) - blockC := makeBlockWithCollection(store.RootBlock, &col3) - resultA := store.CreateBlockAndMockResult(t, blockA) - resultB := store.CreateBlockAndMockResult(t, blockB) - resultC := store.CreateBlockAndMockResult(t, blockC) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - ctx.mockNewStorageSnapshot(resultB) - ctx.mockNewStorageSnapshot(resultC) - - // receive blocks - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockC.Block) - require.NoError(t, err) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col3) - require.NoError(t, err) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col1) - require.NoError(t, err) - - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(3) // waiting for A, B, C to be executed - ctx.mockSaveExecutionResults(store, &wg) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col2) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertExecuted(t, "B", blockB.ID()) - store.AssertExecuted(t, "C", blockC.ID()) - }) -} - -func logBlocks(blocks map[string]*entity.ExecutableBlock) { - log := unittest.Logger() - for name, b := range blocks { - log.Debug().Msgf("creating blocks for testing, block %v's ID:%v", name, b.ID()) - } -} - -// verify that when blocks above the stop height are finalized, they won't -// be executed -func TestStopAtHeightWhenFinalizedBeforeExecuted(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - // this collection is used as trigger of execution - executionTrigger := unittest.CollectionFixture(1) - blockA := makeBlockWithCollection(store.RootBlock, &executionTrigger) - blockB := makeBlockWithCollection(blockA.Block.Header) - blockC := makeBlockWithCollection(blockB.Block.Header) - blockD := makeBlockWithCollection(blockC.Block.Header) - - resultA := store.CreateBlockAndMockResult(t, blockA) - resultB := store.CreateBlockAndMockResult(t, blockB) - store.CreateBlockAndMockResult(t, blockC) - store.CreateBlockAndMockResult(t, blockD) - - stopHeight := store.RootBlock.Height + 3 - require.Equal(t, stopHeight, blockC.Block.Header.Height) // stop at C (C will not be executed) - err := ctx.stopControl.SetStopParameters(stop.StopParameters{ - StopBeforeHeight: stopHeight, - }) - require.NoError(t, err) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - ctx.mockNewStorageSnapshot(resultB) - - // receive blocks - err = ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockC.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockD.Block) - require.NoError(t, err) - - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(2) // only 2 blocks (A, B) will be executed - ctx.mockSaveExecutionResults(store, &wg) - - // all blocks finalized - ctx.stopControl.BlockFinalizedForTesting(blockA.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockB.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockC.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockD.Block.Header) - - // receiving the colleciton to trigger all blocks to be executed - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &executionTrigger) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // since stop height is C, verify that only A and B are executed, C and D are not executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertExecuted(t, "B", blockB.ID()) - - store.AssertNotExecuted(t, "C", blockC.ID()) - store.AssertNotExecuted(t, "D", blockD.ID()) - }) -} - -// verify that blocks above the stop height won't be executed, even if they are -// later they got finalized -func TestStopAtHeightWhenExecutedBeforeFinalized(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - blockA := makeBlockWithCollection(store.RootBlock) - blockB := makeBlockWithCollection(blockA.Block.Header) - blockC := makeBlockWithCollection(blockB.Block.Header) - blockD := makeBlockWithCollection(blockC.Block.Header) - - resultA := store.CreateBlockAndMockResult(t, blockA) - resultB := store.CreateBlockAndMockResult(t, blockB) - store.CreateBlockAndMockResult(t, blockC) - store.CreateBlockAndMockResult(t, blockD) - - stopHeight := store.RootBlock.Height + 3 - require.Equal(t, stopHeight, blockC.Block.Header.Height) // stop at C (C will not be executed) - err := ctx.stopControl.SetStopParameters(stop.StopParameters{ - StopBeforeHeight: stopHeight, - }) - require.NoError(t, err) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - ctx.mockNewStorageSnapshot(resultB) - - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(2) // waiting for only A, B to be executed - ctx.mockSaveExecutionResults(store, &wg) - - // receive blocks - err = ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockC.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockD.Block) - require.NoError(t, err) - - // all blocks finalized - ctx.stopControl.BlockFinalizedForTesting(blockA.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockB.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockC.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockD.Block.Header) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // since stop height is C, verify that only A and B are executed, C and D are not executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertExecuted(t, "B", blockB.ID()) - - store.AssertNotExecuted(t, "C", blockC.ID()) - store.AssertNotExecuted(t, "D", blockD.ID()) - }) -} - -// verify that when blocks execution and finalization happen concurrently -func TestStopAtHeightWhenExecutionFinalization(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - // Root <- A <- B (stop height, won't execute) <- C - // verify when executing A and finalizing B happens concurrently, - // still won't allow B and C to be executed - blockA := makeBlockWithCollection(store.RootBlock) - blockB := makeBlockWithCollection(blockA.Block.Header) - blockC := makeBlockWithCollection(blockB.Block.Header) - - resultA := store.CreateBlockAndMockResult(t, blockA) - store.CreateBlockAndMockResult(t, blockB) - store.CreateBlockAndMockResult(t, blockC) - - err := ctx.stopControl.SetStopParameters(stop.StopParameters{ - StopBeforeHeight: blockB.Block.Header.Height, - }) - require.NoError(t, err) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(resultA) - - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - // waiting for: - // 1. A, B, C to be handled - // 2. A, B, C to be finalized - // 3. only A to be executed - wg.Add(3) - ctx.mockSaveExecutionResults(store, &wg) - - // receive blocks - go func(wg *sync.WaitGroup) { - err = ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - err = ctx.engine.handleBlock(context.Background(), blockC.Block) - require.NoError(t, err) - wg.Done() - }(&wg) - - go func(wg *sync.WaitGroup) { - // all blocks finalized - ctx.stopControl.BlockFinalizedForTesting(blockA.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockB.Block.Header) - ctx.stopControl.BlockFinalizedForTesting(blockC.Block.Header) - wg.Done() - }(&wg) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // since stop height is C, verify that only A and B are executed, C and D are not executed - store.AssertExecuted(t, "A", blockA.ID()) - store.AssertNotExecuted(t, "B", blockB.ID()) - store.AssertNotExecuted(t, "C", blockC.ID()) - }) -} - -// TestExecutedBlockUploadedFailureDoesntBlock tests that block processing continues even the -// uploader fails with an error -func TestExecutedBlockUploadedFailureDoesntBlock(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - store := mocks.NewMockBlockStore(t) - - col := unittest.CollectionFixture(1) - // Root <- A - blockA := makeBlockWithCollection(store.RootBlock, &col) - result := store.CreateBlockAndMockResult(t, blockA) - - ctx.mockIsBlockExecuted(store) - ctx.mockStateCommitmentByBlockID(store) - ctx.mockGetExecutionResultID(store) - ctx.mockNewStorageSnapshot(result) - - // receive block - err := ctx.engine.handleBlock(context.Background(), blockA.Block) - require.NoError(t, err) - - ctx.mockComputeBlock(store) - wg := sync.WaitGroup{} - wg.Add(1) // wait for block A to be executed - ctx.mockSaveExecutionResults(store, &wg) - - // verify upload will fail - uploader1 := uploadermock.NewUploader(ctx.t) - uploader1.On("Upload", result).Return(fmt.Errorf("error uploading")).Once() - ctx.uploadMgr.AddUploader(uploader1) - - // verify broadcast will be called - ctx.providerEngine.On("BroadcastExecutionReceipt", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - - err = ctx.engine.handleCollection(unittest.IdentifierFixture(), &col) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // verify collection is fetched - require.True(t, ctx.fetcher.IsFetched(col.ID())) - - // verify block is executed - store.AssertExecuted(t, "A", blockA.ID()) - }) -} - -func makeCollection() (*flow.Collection, *flow.CollectionGuarantee) { - col := unittest.CollectionFixture(1) - gua := col.Guarantee() - return &col, &gua -} - -func makeBlockWithCollection(parent *flow.Header, cols ...*flow.Collection) *entity.ExecutableBlock { - block := unittest.BlockWithParentFixture(parent) - completeCollections := make(map[flow.Identifier]*entity.CompleteCollection, len(block.Payload.Guarantees)) - for _, col := range cols { - g := col.Guarantee() - block.Payload.Guarantees = append(block.Payload.Guarantees, &g) - - cc := &entity.CompleteCollection{ - Guarantee: &g, - Transactions: col.Transactions, - } - completeCollections[col.ID()] = cc - } - block.Header.PayloadHash = block.Payload.Hash() - - executableBlock := &entity.ExecutableBlock{ - Block: block, - CompleteCollections: completeCollections, - StartState: unittest.StateCommitmentPointerFixture(), - } - return executableBlock -} - -func (ctx *testingContext) mockIsBlockExecuted(store *mocks.MockBlockStore) { - ctx.executionState.On("IsBlockExecuted", mock.Anything, mock.Anything). - Return(func(height uint64, blockID flow.Identifier) (bool, error) { - _, err := store.GetExecuted(blockID) - if err != nil { - return false, nil - } - return true, nil - }) -} - -func (ctx *testingContext) mockStateCommitmentByBlockID(store *mocks.MockBlockStore) { - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything). - Return(func(blockID flow.Identifier) (flow.StateCommitment, error) { - result, err := store.GetExecuted(blockID) - if err != nil { - return flow.StateCommitment{}, storageerr.ErrNotFound - } - return result.Result.CurrentEndState(), nil - }) -} - -func (ctx *testingContext) mockGetExecutionResultID(store *mocks.MockBlockStore) { - ctx.executionState.On("GetExecutionResultID", mock.Anything, mock.Anything). - Return(func(ctx context.Context, blockID flow.Identifier) (flow.Identifier, error) { - blockResult, err := store.GetExecuted(blockID) - if err != nil { - return flow.ZeroID, storageerr.ErrNotFound - } - - return blockResult.Result.ExecutionReceipt.ExecutionResult.ID(), nil - }) -} - -func (ctx *testingContext) mockNewStorageSnapshot(result *execution.ComputationResult) { - // the result is the mocked result for the block, in other words, if the ingestion executes this block, - // the mocked computationManager will produce this result. - // so when mocking the StorageSnapshot method, it must be called with the StartState, as well as its - // parent block, which is used for retrieving the storage state at the end of the parent block. - ctx.executionState.On("NewStorageSnapshot", - *result.ExecutableBlock.StartState, - result.ExecutableBlock.Block.Header.ParentID, - result.ExecutableBlock.Block.Header.Height-1).Return(nil) -} - -func (ctx *testingContext) mockComputeBlock(store *mocks.MockBlockStore) { - ctx.computationManager.On("ComputeBlock", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(func(ctx context.Context, - parentBlockExecutionResultID flow.Identifier, - block *entity.ExecutableBlock, - snapshot snapshot.StorageSnapshot) ( - *execution.ComputationResult, error) { - blockResult, ok := store.ResultByBlock[block.ID()] - if !ok { - return nil, fmt.Errorf("block %s not found", block.ID()) - } - return blockResult.Result, nil - }) -} - -func (ctx *testingContext) mockSaveExecutionResults(store *mocks.MockBlockStore, wg *sync.WaitGroup) { - ctx.executionState.On("SaveExecutionResults", mock.Anything, mock.Anything). - Return(func(ctx context.Context, result *execution.ComputationResult) error { - defer wg.Done() - err := store.MarkExecuted(result) - if err != nil { - return err - } - return nil - }) -} diff --git a/engine/execution/ingestion/machine.go b/engine/execution/ingestion/machine.go index a4d0a27ee5d..efb9f521b83 100644 --- a/engine/execution/ingestion/machine.go +++ b/engine/execution/ingestion/machine.go @@ -54,7 +54,7 @@ func NewMachine( broadcaster provider.ProviderEngine, uploader *uploader.Manager, stopControl *stop.StopControl, -) (*Machine, module.ReadyDoneAware, error) { +) (*Machine, *Core, error) { e := &Machine{ log: logger.With().Str("engine", "ingestion_machine").Logger(), diff --git a/engine/execution/ingestion/mempool.go b/engine/execution/ingestion/mempool.go deleted file mode 100644 index 58d2b11f923..00000000000 --- a/engine/execution/ingestion/mempool.go +++ /dev/null @@ -1,29 +0,0 @@ -package ingestion - -//revive:disable:unexported-return - -import ( - "github.com/onflow/flow-go/module/mempool/stdmap" -) - -type Mempool struct { - ExecutionQueue *stdmap.Queues - BlockByCollection *stdmap.BlockByCollections -} - -func (m *Mempool) Run(f func(blockByCollection *stdmap.BlockByCollectionBackdata, executionQueue *stdmap.QueuesBackdata) error) error { - return m.ExecutionQueue.Run(func(queueBackdata *stdmap.QueuesBackdata) error { - return m.BlockByCollection.Run(func(blockByCollectionBackdata *stdmap.BlockByCollectionBackdata) error { - return f(blockByCollectionBackdata, queueBackdata) - }) - }) -} - -func newMempool() *Mempool { - m := &Mempool{ - BlockByCollection: stdmap.NewBlockByCollections(), - ExecutionQueue: stdmap.NewQueues(), - } - - return m -} diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 5a7a99dc417..5a9d394a5ac 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -53,7 +53,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("543fa7112081094b66871692c5a7784f40a9e5cdb9cfda10d1d9b81653966409") + expectedStateCommitmentBytes, _ := hex.DecodeString("6e70a1ff40e4312a547d588a4355a538610bc22844a1faa907b4ec333ff1eca9") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/engine/testutil/mock/nodes.go b/engine/testutil/mock/nodes.go index d12a409cf57..8a2ea4fed1a 100644 --- a/engine/testutil/mock/nodes.go +++ b/engine/testutil/mock/nodes.go @@ -22,7 +22,7 @@ import ( "github.com/onflow/flow-go/engine/consensus/matching" "github.com/onflow/flow-go/engine/consensus/sealing" "github.com/onflow/flow-go/engine/execution/computation" - "github.com/onflow/flow-go/engine/execution/ingestion" + executionIngest "github.com/onflow/flow-go/engine/execution/ingestion" executionprovider "github.com/onflow/flow-go/engine/execution/provider" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/engine/verification/assigner" @@ -192,7 +192,7 @@ func (cn ConsensusNode) Done() { type ExecutionNode struct { GenericNode FollowerState protocol.FollowerState - IngestionEngine *ingestion.Engine + IngestionEngine *executionIngest.Core ExecutionEngine *computation.Manager RequestEngine *requester.Engine ReceiptsEngine *executionprovider.Engine @@ -217,6 +217,7 @@ func (en ExecutionNode) Ready(ctx context.Context) { // new interface. irctx, _ := irrecoverable.WithSignaler(ctx) en.ReceiptsEngine.Start(irctx) + en.IngestionEngine.Start(irctx) en.FollowerCore.Start(irctx) en.FollowerEngine.Start(irctx) en.SyncEngine.Start(irctx) @@ -238,7 +239,6 @@ func (en ExecutionNode) Done(cancelFunc context.CancelFunc) { // to stop all (deprecated) ready-done-aware <-util.AllDone( - en.IngestionEngine, en.IngestionEngine, en.ReceiptsEngine, en.Ledger, diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index ee3b3b10ee7..a602e628565 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -46,7 +46,6 @@ import ( "github.com/onflow/flow-go/engine/execution/computation/query" "github.com/onflow/flow-go/engine/execution/ingestion" exeFetcher "github.com/onflow/flow-go/engine/execution/ingestion/fetcher" - "github.com/onflow/flow-go/engine/execution/ingestion/loader" "github.com/onflow/flow-go/engine/execution/ingestion/stop" "github.com/onflow/flow-go/engine/execution/ingestion/uploader" executionprovider "github.com/onflow/flow-go/engine/execution/provider" @@ -728,31 +727,25 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide ) fetcher := exeFetcher.NewCollectionFetcher(node.Log, requestEngine, node.State, false) - loader := loader.NewUnexecutedLoader(node.Log, node.State, node.Headers, execState) rootHead, rootQC := getRoot(t, &node) - ingestionEngine, err := ingestion.New( - unit, + _, ingestionCore, err := ingestion.NewMachine( node.Log, - node.Net, + node.ProtocolEvents, + requestEngine, fetcher, node.Headers, node.Blocks, collectionsStorage, - computationEngine, - pusherEngine, execState, + node.State, node.Metrics, - node.Tracer, - false, - nil, + computationEngine, + pusherEngine, uploader, stopControl, - loader, ) require.NoError(t, err) - requestEngine.WithHandle(ingestionEngine.OnCollection) - - node.ProtocolEvents.AddConsumer(ingestionEngine) + node.ProtocolEvents.AddConsumer(stopControl) followerCore, finalizer := createFollowerCore(t, &node, followerState, followerDistributor, rootHead, rootQC) // mock out hotstuff validator @@ -815,7 +808,7 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide return testmock.ExecutionNode{ GenericNode: node, FollowerState: followerState, - IngestionEngine: ingestionEngine, + IngestionEngine: ingestionCore, FollowerCore: followerCore, FollowerEngine: followerEng, SyncEngine: syncEngine, diff --git a/fvm/context.go b/fvm/context.go index 4007f22286f..fd198633b54 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -392,3 +392,12 @@ func WithEVMTracer(tracer debug.EVMTracer) Option { return ctx } } + +// WithReadVersionFromNodeVersionBeacon sets whether the version from the node version beacon should be read +// this should only be disabled for testing +func WithReadVersionFromNodeVersionBeacon(enabled bool) Option { + return func(ctx Context) Context { + ctx.ReadVersionFromNodeVersionBeacon = enabled + return ctx + } +} diff --git a/fvm/environment/derived_data_invalidator.go b/fvm/environment/derived_data_invalidator.go index 77659ed984d..754a1e37513 100644 --- a/fvm/environment/derived_data_invalidator.go +++ b/fvm/environment/derived_data_invalidator.go @@ -25,7 +25,7 @@ func (u ContractUpdates) Any() bool { type DerivedDataInvalidator struct { ContractUpdates - MeterParamOverridesUpdated bool + ExecutionParametersUpdated bool } var _ derived.TransactionInvalidator = DerivedDataInvalidator{} @@ -37,16 +37,16 @@ func NewDerivedDataInvalidator( ) DerivedDataInvalidator { return DerivedDataInvalidator{ ContractUpdates: contractUpdates, - MeterParamOverridesUpdated: meterParamOverridesUpdated( + ExecutionParametersUpdated: executionParametersUpdated( executionSnapshot, meterStateRead), } } -// meterParamOverridesUpdated returns true if the meter param overrides have been updated +// executionParametersUpdated returns true if the meter param overrides have been updated // this is done by checking if the registers needed to compute the meter param overrides // have been touched in the execution snapshot -func meterParamOverridesUpdated( +func executionParametersUpdated( executionSnapshot *snapshot.ExecutionSnapshot, meterStateRead *snapshot.ExecutionSnapshot, ) bool { @@ -73,8 +73,8 @@ func (invalidator DerivedDataInvalidator) ProgramInvalidator() derived.ProgramIn return ProgramInvalidator{invalidator} } -func (invalidator DerivedDataInvalidator) MeterParamOverridesInvalidator() derived.MeterParamOverridesInvalidator { - return MeterParamOverridesInvalidator{invalidator} +func (invalidator DerivedDataInvalidator) ExecutionParametersInvalidator() derived.ExecutionParametersInvalidator { + return ExecutionParametersInvalidator{invalidator} } type ProgramInvalidator struct { @@ -82,7 +82,7 @@ type ProgramInvalidator struct { } func (invalidator ProgramInvalidator) ShouldInvalidateEntries() bool { - return invalidator.MeterParamOverridesUpdated || + return invalidator.ExecutionParametersUpdated || invalidator.ContractUpdates.Any() } @@ -91,7 +91,7 @@ func (invalidator ProgramInvalidator) ShouldInvalidateEntry( program *derived.Program, _ *snapshot.ExecutionSnapshot, ) bool { - if invalidator.MeterParamOverridesUpdated { + if invalidator.ExecutionParametersUpdated { // if meter parameters changed we need to invalidate all programs return true } @@ -124,18 +124,18 @@ func (invalidator ProgramInvalidator) ShouldInvalidateEntry( return false } -type MeterParamOverridesInvalidator struct { +type ExecutionParametersInvalidator struct { DerivedDataInvalidator } -func (invalidator MeterParamOverridesInvalidator) ShouldInvalidateEntries() bool { - return invalidator.MeterParamOverridesUpdated +func (invalidator ExecutionParametersInvalidator) ShouldInvalidateEntries() bool { + return invalidator.ExecutionParametersUpdated } -func (invalidator MeterParamOverridesInvalidator) ShouldInvalidateEntry( +func (invalidator ExecutionParametersInvalidator) ShouldInvalidateEntry( _ struct{}, - _ derived.MeterParamOverrides, + _ derived.StateExecutionParameters, _ *snapshot.ExecutionSnapshot, ) bool { - return invalidator.MeterParamOverridesUpdated + return invalidator.ExecutionParametersUpdated } diff --git a/fvm/environment/derived_data_invalidator_test.go b/fvm/environment/derived_data_invalidator_test.go index 5db306642d0..147c03fb57a 100644 --- a/fvm/environment/derived_data_invalidator_test.go +++ b/fvm/environment/derived_data_invalidator_test.go @@ -82,7 +82,7 @@ func TestDerivedDataProgramInvalidator(t *testing.T) { }) t.Run("meter parameters invalidator invalidates all entries", func(t *testing.T) { invalidator := environment.DerivedDataInvalidator{ - MeterParamOverridesUpdated: true, + ExecutionParametersUpdated: true, }.ProgramInvalidator() require.True(t, invalidator.ShouldInvalidateEntries()) @@ -207,23 +207,23 @@ func TestDerivedDataProgramInvalidator(t *testing.T) { func TestMeterParamOverridesInvalidator(t *testing.T) { invalidator := environment.DerivedDataInvalidator{}. - MeterParamOverridesInvalidator() + ExecutionParametersInvalidator() require.False(t, invalidator.ShouldInvalidateEntries()) require.False(t, invalidator.ShouldInvalidateEntry( struct{}{}, - derived.MeterParamOverrides{}, + derived.StateExecutionParameters{}, nil)) invalidator = environment.DerivedDataInvalidator{ ContractUpdates: environment.ContractUpdates{}, - MeterParamOverridesUpdated: true, - }.MeterParamOverridesInvalidator() + ExecutionParametersUpdated: true, + }.ExecutionParametersInvalidator() require.True(t, invalidator.ShouldInvalidateEntries()) require.True(t, invalidator.ShouldInvalidateEntry( struct{}{}, - derived.MeterParamOverrides{}, + derived.StateExecutionParameters{}, nil)) } @@ -265,7 +265,11 @@ func TestMeterParamOverridesUpdated(t *testing.T) { txnState, err := blockDatabase.NewTransaction(0, state.DefaultParameters()) require.NoError(t, err) - computer := fvm.NewMeterParamOverridesComputer(ctx, txnState) + computer := fvm.NewExecutionParametersComputer( + ctx.Logger, + ctx, + txnState, + ) overrides, err := computer.Compute(txnState, struct{}{}) require.NoError(t, err) @@ -300,7 +304,7 @@ func TestMeterParamOverridesUpdated(t *testing.T) { environment.ContractUpdates{}, snapshot, meterStateRead) - require.Equal(t, expected, invalidator.MeterParamOverridesUpdated) + require.Equal(t, expected, invalidator.ExecutionParametersUpdated) } executionSnapshot, err = txnState.FinalizeMainTransaction() diff --git a/fvm/environment/env.go b/fvm/environment/env.go index 5b01728111c..e23e9c64deb 100644 --- a/fvm/environment/env.go +++ b/fvm/environment/env.go @@ -62,6 +62,12 @@ type Environment interface { error, ) + // GetCurrentVersionBoundary executes the getCurrentVersionBoundary function on the NodeVersionBeacon contract. + // the function will return the version boundary (version, block height) that is currently in effect. + // the version boundary currently in effect is the highest one not above the current block height. + // if there is no existing version boundary lower than the current block height, the function will return version 0 and block height 0. + GetCurrentVersionBoundary() (cadence.Value, error) + // AccountInfo GetAccount(address flow.Address) (*flow.Account, error) GetAccountKeys(address flow.Address) ([]flow.AccountPublicKey, error) diff --git a/fvm/environment/facade_env.go b/fvm/environment/facade_env.go index 67e20e02394..57b01d1a853 100644 --- a/fvm/environment/facade_env.go +++ b/fvm/environment/facade_env.go @@ -38,6 +38,7 @@ type facadeEnvironment struct { ValueStore *SystemContracts + MinimumCadenceRequiredVersion UUIDGenerator AccountLocalIDGenerator @@ -107,6 +108,9 @@ func newFacadeEnvironment( ), SystemContracts: systemContracts, + MinimumCadenceRequiredVersion: NewMinimumCadenceRequiredVersion( + txnState, + ), UUIDGenerator: NewUUIDGenerator( tracer, diff --git a/fvm/environment/meter.go b/fvm/environment/meter.go index 00a70df2d30..614cd124a52 100644 --- a/fvm/environment/meter.go +++ b/fvm/environment/meter.go @@ -4,6 +4,7 @@ import ( "context" "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/meter" @@ -70,18 +71,13 @@ var MainnetExecutionEffortWeights = meter.ExecutionEffortWeights{ } type Meter interface { - MeterComputation(common.ComputationKind, uint) error - ComputationUsed() (uint64, error) + runtime.MeterInterface + ComputationIntensities() meter.MeteredComputationIntensities ComputationAvailable(common.ComputationKind, uint) bool - MeterMemory(usage common.MemoryUsage) error - MemoryUsed() (uint64, error) - MeterEmittedEvent(byteSize uint64) error TotalEmittedEventBytes() uint64 - - InteractionUsed() (uint64, error) } type meterImpl struct { @@ -112,6 +108,10 @@ func (meter *meterImpl) ComputationAvailable( return meter.txnState.ComputationAvailable(kind, intensity) } +func (meter *meterImpl) ComputationRemaining(kind common.ComputationKind) uint { + return meter.txnState.ComputationRemaining(kind) +} + func (meter *meterImpl) ComputationUsed() (uint64, error) { return meter.txnState.TotalComputationUsed(), nil } diff --git a/fvm/environment/minimum_required_version.go b/fvm/environment/minimum_required_version.go new file mode 100644 index 00000000000..3095c33cda9 --- /dev/null +++ b/fvm/environment/minimum_required_version.go @@ -0,0 +1,99 @@ +package environment + +import ( + "github.com/coreos/go-semver/semver" + + "github.com/onflow/flow-go/fvm/storage/state" +) + +// MinimumCadenceRequiredVersion returns the minimum required cadence version for the current environment +// in semver format. +type MinimumCadenceRequiredVersion interface { + MinimumRequiredVersion() (string, error) +} + +type minimumCadenceRequiredVersion struct { + txnPreparer state.NestedTransactionPreparer +} + +func NewMinimumCadenceRequiredVersion( + txnPreparer state.NestedTransactionPreparer, +) MinimumCadenceRequiredVersion { + return minimumCadenceRequiredVersion{ + txnPreparer: txnPreparer, + } +} + +// MinimumRequiredVersion The returned cadence version can be used by cadence runtime for supporting feature flag. +// The feature flag in cadence allows ENs to produce consistent results even if running with +// different cadence versions at the same height, which is useful for rolling out cadence +// upgrade without all ENs restarting all together. +// For instance, we would like to grade cadence from v1 to v3, where v3 has a new cadence feature. +// We first make a cadence v2 that has feature flag only turned on when the MinimumRequiredVersion() +// method returns v2 or above. +// So cadence v2 with the feature flag turned off will produce the same result as v1 which doesn't have the feature. +// And cadence v2 with the feature flag turned on will also produce the same result as v3 which has the feature. +// The feature flag allows us to roll out cadence v2 to all ENs which was running v1. +// And we use the MinimumRequiredVersion to control when the feature flag should be switched from off to on. +// And the switching should happen at the same height for all ENs. +// +// The height-based switch over can be done by using VersionBeacon, however, the VersionBeacon only +// defines the flow-go version, not cadence version. +// So we first read the current minimum required flow-go version from the VersionBeacon control, +// and map it to the cadence version to be used by cadence to decide feature flag status. +// +// For instance, let’s say all ENs are running flow-go v0.37.0 with cadence v1. +// We first create a version mapping entry for flow-go v0.37.1 to cadence v2, and roll out v0.37.1 to all ENs. +// v0.37.1 ENs will produce the same result as v0.37.0 ENs, because the current version beacon still returns v0.37.0, +// which maps zero cadence version, and cadence will keep the feature flag off. +// +// After all ENs have upgraded to v0.37.1, we send out a version beacon to switch to v0.37.1 at a future height, +// let’s say height 1000. +// Then what happens is that: +// 1. ENs running v0.37.0 will crash after height 999, until upgrade to higher version +// 2. ENs running v0.37.1 will execute with cadence v2 with feature flag off up until height 999, and from height 1000, +// the feature flag will be on, which means all v0.37.1 ENs will again produce consistent results for blocks above 1000. +// +// After height 1000 have been sealed, we can roll out v0.37.2 to all ENs with cadence v3, and it will produce the consistent +// result as v0.37.1. +func (c minimumCadenceRequiredVersion) MinimumRequiredVersion() (string, error) { + executionParameters := c.txnPreparer.ExecutionParameters() + + // map the minimum required flow-go version to a minimum required cadence version + cadenceVersion := mapToCadenceVersion(executionParameters.ExecutionVersion, minimumFvmToMinimumCadenceVersionMapping) + + return cadenceVersion.String(), nil +} + +func mapToCadenceVersion(flowGoVersion semver.Version, versionMapping FlowGoToCadenceVersionMapping) semver.Version { + if versionGreaterThanOrEqualTo(flowGoVersion, versionMapping.FlowGoVersion) { + return versionMapping.CadenceVersion + } else { + return semver.Version{} + } +} + +func versionGreaterThanOrEqualTo(version semver.Version, other semver.Version) bool { + return version.Compare(other) >= 0 +} + +type FlowGoToCadenceVersionMapping struct { + FlowGoVersion semver.Version + CadenceVersion semver.Version +} + +// This could also be a map, but ist not needed because we only expect one entry at a give time +// we won't be fixing 2 separate issues at 2 separate version with one deploy. +var minimumFvmToMinimumCadenceVersionMapping = FlowGoToCadenceVersionMapping{ + // Leaving this example in, so it's easier to understand + // + // FlowGoVersion: *semver.New("0.37.0"), + // CadenceVersion: *semver.New("1.0.0"), + // +} + +func SetFVMToCadenceVersionMappingForTestingOnly(mapping FlowGoToCadenceVersionMapping) { + minimumFvmToMinimumCadenceVersionMapping = mapping +} + +var _ MinimumCadenceRequiredVersion = (*minimumCadenceRequiredVersion)(nil) diff --git a/fvm/environment/minimum_required_version_test.go b/fvm/environment/minimum_required_version_test.go new file mode 100644 index 00000000000..a72e10567df --- /dev/null +++ b/fvm/environment/minimum_required_version_test.go @@ -0,0 +1,63 @@ +package environment + +import ( + "testing" + + "github.com/coreos/go-semver/semver" + "github.com/stretchr/testify/require" +) + +func Test_MapToCadenceVersion(t *testing.T) { + flowV0 := semver.Version{} + cadenceV0 := semver.Version{} + flowV1 := semver.Version{ + Major: 0, + Minor: 37, + Patch: 0, + } + cadenceV1 := semver.Version{ + Major: 1, + Minor: 0, + Patch: 0, + } + + mapping := FlowGoToCadenceVersionMapping{ + FlowGoVersion: flowV1, + CadenceVersion: cadenceV1, + } + + t.Run("no mapping, v0", func(t *testing.T) { + version := mapToCadenceVersion(flowV0, FlowGoToCadenceVersionMapping{}) + + require.Equal(t, cadenceV0, version) + }) + + t.Run("v0", func(t *testing.T) { + version := mapToCadenceVersion(flowV0, mapping) + + require.Equal(t, semver.Version{}, version) + }) + t.Run("v1 - delta", func(t *testing.T) { + + v := flowV1 + v.Patch -= 1 + + version := mapToCadenceVersion(v, mapping) + + require.Equal(t, cadenceV0, version) + }) + t.Run("v1", func(t *testing.T) { + version := mapToCadenceVersion(flowV1, mapping) + + require.Equal(t, cadenceV1, version) + }) + t.Run("v1 + delta", func(t *testing.T) { + + v := flowV1 + v.BumpPatch() + + version := mapToCadenceVersion(v, mapping) + + require.Equal(t, cadenceV1, version) + }) +} diff --git a/fvm/environment/mock/environment.go b/fvm/environment/mock/environment.go index 1766579d621..445dcbd49d8 100644 --- a/fvm/environment/mock/environment.go +++ b/fvm/environment/mock/environment.go @@ -907,6 +907,36 @@ func (_m *Environment) GetCurrentBlockHeight() (uint64, error) { return r0, r1 } +// GetCurrentVersionBoundary provides a mock function with given fields: +func (_m *Environment) GetCurrentVersionBoundary() (cadence.Value, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCurrentVersionBoundary") + } + + var r0 cadence.Value + var r1 error + if rf, ok := ret.Get(0).(func() (cadence.Value, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() cadence.Value); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cadence.Value) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetInterpreterSharedState provides a mock function with given fields: func (_m *Environment) GetInterpreterSharedState() *interpreter.SharedState { ret := _m.Called() diff --git a/fvm/environment/mock/meter.go b/fvm/environment/mock/meter.go index 638d12a85e5..d73a61c5ff7 100644 --- a/fvm/environment/mock/meter.go +++ b/fvm/environment/mock/meter.go @@ -137,9 +137,9 @@ func (_m *Meter) MemoryUsed() (uint64, error) { return r0, r1 } -// MeterComputation provides a mock function with given fields: _a0, _a1 -func (_m *Meter) MeterComputation(_a0 common.ComputationKind, _a1 uint) error { - ret := _m.Called(_a0, _a1) +// MeterComputation provides a mock function with given fields: operationType, intensity +func (_m *Meter) MeterComputation(operationType common.ComputationKind, intensity uint) error { + ret := _m.Called(operationType, intensity) if len(ret) == 0 { panic("no return value specified for MeterComputation") @@ -147,7 +147,7 @@ func (_m *Meter) MeterComputation(_a0 common.ComputationKind, _a1 uint) error { var r0 error if rf, ok := ret.Get(0).(func(common.ComputationKind, uint) error); ok { - r0 = rf(_a0, _a1) + r0 = rf(operationType, intensity) } else { r0 = ret.Error(0) } diff --git a/fvm/environment/mock/minimum_cadence_required_version.go b/fvm/environment/mock/minimum_cadence_required_version.go new file mode 100644 index 00000000000..d3bff30e08b --- /dev/null +++ b/fvm/environment/mock/minimum_cadence_required_version.go @@ -0,0 +1,52 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// MinimumCadenceRequiredVersion is an autogenerated mock type for the MinimumCadenceRequiredVersion type +type MinimumCadenceRequiredVersion struct { + mock.Mock +} + +// MinimumRequiredVersion provides a mock function with given fields: +func (_m *MinimumCadenceRequiredVersion) MinimumRequiredVersion() (string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinimumRequiredVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func() (string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewMinimumCadenceRequiredVersion creates a new instance of MinimumCadenceRequiredVersion. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMinimumCadenceRequiredVersion(t interface { + mock.TestingT + Cleanup(func()) +}) *MinimumCadenceRequiredVersion { + mock := &MinimumCadenceRequiredVersion{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/programs_test.go b/fvm/environment/programs_test.go index e39d224c9d4..b0368db49e0 100644 --- a/fvm/environment/programs_test.go +++ b/fvm/environment/programs_test.go @@ -156,7 +156,9 @@ func Test_Programs(t *testing.T) { fvm.WithAuthorizationChecksEnabled(false), fvm.WithSequenceNumberCheckAndIncrementEnabled(false), fvm.WithCadenceLogging(true), - fvm.WithDerivedBlockData(derivedBlockData)) + fvm.WithDerivedBlockData(derivedBlockData), + // disable reading version from node version beacon otherwise it loads an extra contract + fvm.WithReadVersionFromNodeVersionBeacon(false)) var contractASnapshot *snapshot.ExecutionSnapshot var contractBSnapshot *snapshot.ExecutionSnapshot @@ -613,7 +615,9 @@ func Test_ProgramsDoubleCounting(t *testing.T) { fvm.WithSequenceNumberCheckAndIncrementEnabled(false), fvm.WithCadenceLogging(true), fvm.WithDerivedBlockData(derivedBlockData), - fvm.WithMetricsReporter(metrics)) + fvm.WithMetricsReporter(metrics), + // disable reading version from node version beacon otherwise it loads an extra contract + fvm.WithReadVersionFromNodeVersionBeacon(false)) t.Run("deploy contracts and ensure cache is empty", func(t *testing.T) { // deploy contract A diff --git a/fvm/environment/system_contracts.go b/fvm/environment/system_contracts.go index 22860dd38ea..e8e9598aa3c 100644 --- a/fvm/environment/system_contracts.go +++ b/fvm/environment/system_contracts.go @@ -312,3 +312,21 @@ func (sys *SystemContracts) AccountsStorageCapacity( }, ) } + +var getCurrentVersionBoundarySpec = ContractFunctionSpec{ + AddressFromChain: ServiceAddress, + LocationName: systemcontracts.ContractNameNodeVersionBeacon, + FunctionName: systemcontracts.ContractVersionBeacon_getCurrentVersionBoundary, + ArgumentTypes: []sema.Type{}, +} + +// GetCurrentVersionBoundary executes the getCurrentVersionBoundary function on the NodeVersionBeacon contract. +// the function will return the version boundary (version, block height) that is currently in effect. +// the version boundary currently in effect is the highest one not above the current block height. +// if there is no existing version boundary lower than the current block height, the function will return version 0 and block height 0. +func (sys *SystemContracts) GetCurrentVersionBoundary() (cadence.Value, error) { + return sys.Invoke( + getCurrentVersionBoundarySpec, + []cadence.Value{}, + ) +} diff --git a/fvm/evm/offchain/utils/collection.go b/fvm/evm/offchain/utils/collection.go new file mode 100644 index 00000000000..143c2bd3eee --- /dev/null +++ b/fvm/evm/offchain/utils/collection.go @@ -0,0 +1,104 @@ +package utils + +import ( + "bufio" + "context" + "encoding/hex" + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + "sort" + + "github.com/onflow/flow-go-sdk/access/grpc" +) + +func CollectEventData(conf *Config, path string) error { + + flowClient, err := grpc.NewClient(conf.host) + if err != nil { + return err + } + outputFile := filepath.Join(path, "events.jsonl") + fi, err := os.Create(outputFile) + if err != nil { + return err + } + defer fi.Close() + + writer := bufio.NewWriter(fi) + defer writer.Flush() + + ctx := context.Background() + + txEventType := fmt.Sprintf("A.%s.EVM.TransactionExecuted", conf.evmContractAddress) + blockEventType := fmt.Sprintf("A.%s.EVM.BlockExecuted", conf.evmContractAddress) + + for height := conf.startHeight; height < conf.endHeight; height += conf.batchSize { + events := make([]Event, 0) + result, err := flowClient.GetEventsForHeightRange(ctx, txEventType, height, height+conf.batchSize-1) + if err != nil { + return err + } + if len(result) > 0 { + for _, tEvent := range result { + evs := tEvent.Events + for _, e := range evs { + events = append(events, Event{ + FlowBlockHeight: tEvent.Height, + EventType: e.Type, + EventPayload: hex.EncodeToString(e.Payload), + txIndex: e.TransactionIndex, + eventIndex: e.EventIndex, + }) + } + } + } + result, err = flowClient.GetEventsForHeightRange(ctx, blockEventType, height, height+conf.batchSize-1) + if err != nil { + return err + } + if len(result) > 0 { + for _, bEvent := range result { + evs := bEvent.Events + for _, e := range evs { + events = append(events, Event{ + FlowBlockHeight: bEvent.Height, + EventType: e.Type, + EventPayload: hex.EncodeToString(e.Payload), + // setting to max int to make sure it is order as the last event of the evm block + txIndex: math.MaxInt, + }) + } + } + } + + // sort events by flow height, tx index and then event index + sort.Slice(events, func(i, j int) bool { + if events[i].FlowBlockHeight == events[j].FlowBlockHeight { + if events[i].txIndex == events[j].txIndex { + return events[i].eventIndex < events[j].eventIndex + } + return events[i].txIndex < events[j].txIndex + } + return events[i].FlowBlockHeight < events[j].FlowBlockHeight + }) + + for _, ev := range events { + jsonData, err := json.Marshal(ev) + if err != nil { + return err + } + _, err = writer.WriteString(string(jsonData) + "\n") + if err != nil { + return err + } + err = writer.Flush() + if err != nil { + return err + } + } + } + return writer.Flush() +} diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go new file mode 100644 index 00000000000..a90a8f57bea --- /dev/null +++ b/fvm/evm/offchain/utils/collection_test.go @@ -0,0 +1,99 @@ +package utils_test + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "os" + "strings" + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/offchain/sync" + "github.com/onflow/flow-go/fvm/evm/offchain/utils" + . "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func ReplyingCollectionFromScratch( + t *testing.T, + chainID flow.ChainID, + storage types.BackendStorage, + filePath string, +) { + + rootAddr := evm.StorageAccountAddress(chainID) + + // setup the rootAddress account + as := environment.NewAccountStatus() + err := storage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) + require.NoError(t, err) + + bp, err := blocks.NewBasicProvider(chainID, storage, rootAddr) + require.NoError(t, err) + + file, err := os.Open(filePath) + require.NoError(t, err) + defer file.Close() + + scanner := bufio.NewScanner(file) + + buf := make([]byte, 0, 64*1024) + scanner.Buffer(buf, 1024*1024) + + txEvents := make([]events.TransactionEventPayload, 0) + + for scanner.Scan() { + data := scanner.Bytes() + var e utils.Event + err := json.Unmarshal(data, &e) + require.NoError(t, err) + if strings.Contains(e.EventType, "BlockExecuted") { + temp, err := hex.DecodeString(e.EventPayload) + require.NoError(t, err) + ev, err := ccf.Decode(nil, temp) + require.NoError(t, err) + blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event)) + require.NoError(t, err) + + err = bp.OnBlockReceived(blockEventPayload) + require.NoError(t, err) + + sp := NewTestStorageProvider(storage, blockEventPayload.Height) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) + res, err := cr.ReplayBlock(txEvents, blockEventPayload) + require.NoError(t, err) + // commit all changes + for k, v := range res.StorageRegisterUpdates() { + err = storage.SetValue([]byte(k.Owner), []byte(k.Key), v) + require.NoError(t, err) + } + + err = bp.OnBlockExecuted(blockEventPayload.Height, res) + require.NoError(t, err) + + txEvents = make([]events.TransactionEventPayload, 0) + continue + } + + temp, err := hex.DecodeString(e.EventPayload) + require.NoError(t, err) + ev, err := ccf.Decode(nil, temp) + require.NoError(t, err) + txEv, err := events.DecodeTransactionEventPayload(ev.(cadence.Event)) + require.NoError(t, err) + txEvents = append(txEvents, *txEv) + } + if err := scanner.Err(); err != nil { + t.Fatal(err) + } +} diff --git a/fvm/evm/offchain/utils/types.go b/fvm/evm/offchain/utils/types.go new file mode 100644 index 00000000000..f40b19e4e91 --- /dev/null +++ b/fvm/evm/offchain/utils/types.go @@ -0,0 +1,33 @@ +package utils + +type Event struct { + FlowBlockHeight uint64 `json:"flow_height"` + EventType string `json:"type"` + EventPayload string `json:"payload"` + txIndex int + eventIndex int +} + +type Config struct { + host string + evmContractAddress string // no prefix + startHeight uint64 + endHeight uint64 + batchSize uint64 +} + +var Devnet51Config = Config{ + host: "access-001.devnet51.nodes.onflow.org:9000", + evmContractAddress: "8c5303eaa26202d6", + startHeight: uint64(211176670), + endHeight: uint64(218215349), + batchSize: uint64(50), +} + +var Mainnet25Config = Config{ + host: "access-001.mainnet25.nodes.onflow.org:9000", + evmContractAddress: "e467b9dd11fa00df", + startHeight: uint64(85981135), + endHeight: uint64(88226266), + batchSize: uint64(50), +} diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index 618f9b0b9fa..a547768362e 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -679,7 +679,6 @@ contract EVM { signatures: [[UInt8]], evmAddress: [UInt8; 20] ): ValidationResult { - // make signature set first // check number of signatures matches number of key indices if keyIndices.length != signatures.length { @@ -689,39 +688,58 @@ contract EVM { ) } - var signatureSet: [Crypto.KeyListSignature] = [] - for signatureIndex, signature in signatures{ - signatureSet.append(Crypto.KeyListSignature( - keyIndex: Int(keyIndices[signatureIndex]), - signature: signature - )) - } - // fetch account let acc = getAccount(address) - // constructing key list + var signatureSet: [Crypto.KeyListSignature] = [] let keyList = Crypto.KeyList() - for signature in signatureSet { - let keyRef = acc.keys.get(keyIndex: signature.keyIndex) - if keyRef == nil { - return ValidationResult( - isValid: false, - problem: "invalid key index" - ) - } - let key = keyRef! - if key.isRevoked { - return ValidationResult( - isValid: false, - problem: "account key is revoked" - ) + var keyListLength = 0 + let seenAccountKeyIndices: {Int: Int} = {} + for signatureIndex, signature in signatures{ + // index of the key on the account + let accountKeyIndex = Int(keyIndices[signatureIndex]!) + // index of the key in the key list + var keyListIndex = 0 + + if !seenAccountKeyIndices.containsKey(accountKeyIndex) { + // fetch account key with accountKeyIndex + if let key = acc.keys.get(keyIndex: accountKeyIndex) { + if key.isRevoked { + return ValidationResult( + isValid: false, + problem: "account key is revoked" + ) + } + + keyList.add( + key.publicKey, + hashAlgorithm: key.hashAlgorithm, + // normalization factor. We need to divide by 1000 because the + // `Crypto.KeyList.verify()` function expects the weight to be + // in the range [0, 1]. 1000 is the key weight threshold. + weight: key.weight / 1000.0, + ) + + keyListIndex = keyListLength + keyListLength = keyListLength + 1 + seenAccountKeyIndices[accountKeyIndex] = keyListIndex + } else { + return ValidationResult( + isValid: false, + problem: "invalid key index" + ) + } + } else { + // if we have already seen this accountKeyIndex, use the keyListIndex + // that was previously assigned to it + // `Crypto.KeyList.verify()` knows how to handle duplicate keys + keyListIndex = seenAccountKeyIndices[accountKeyIndex]! } - keyList.add( - key.publicKey, - hashAlgorithm: key.hashAlgorithm, - weight: key.weight, - ) + + signatureSet.append(Crypto.KeyListSignature( + keyIndex: keyListIndex, + signature: signature + )) } let isValid = keyList.verify( diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index bdffe56846b..9bc3092c9ec 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -1,6 +1,7 @@ package stdlib_test import ( + "bytes" "encoding/binary" "encoding/hex" "math/big" @@ -5454,91 +5455,72 @@ func TestEVMAccountCodeHash(t *testing.T) { func TestEVMValidateCOAOwnershipProof(t *testing.T) { t.Parallel() - contractsAddress := flow.BytesToAddress([]byte{0x1}) - proof := &types.COAOwnershipProofInContext{ - COAOwnershipProof: types.COAOwnershipProof{ - Address: types.FlowAddress(contractsAddress), - CapabilityPath: "coa", - Signatures: []types.Signature{[]byte("signature")}, - KeyIndices: []uint64{0}, - }, - SignedData: []byte("signedData"), - EVMAddress: RandomAddress(t), - } - - handler := &testContractHandler{ - deployCOA: func(_ uint64) types.Address { - return proof.EVMAddress - }, - } - transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) - scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) - - rt := runtime.NewInterpreterRuntime(runtime.Config{}) - - accountCodes := map[common.Location][]byte{} - var events []cadence.Event - - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]runtime.Address, error) { - return []runtime.Address{runtime.Address(contractsAddress)}, nil - }, - OnResolveLocation: newLocationResolver(contractsAddress), - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCodes[location] = code - return nil - }, - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - code = accountCodes[location] - return code, nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { - return json.Decode(nil, b) - }, - OnGetAccountKey: func(addr runtime.Address, index uint32) (*cadenceStdlib.AccountKey, error) { - require.Equal(t, proof.Address[:], addr[:]) - return &cadenceStdlib.AccountKey{ - PublicKey: &cadenceStdlib.PublicKey{}, - KeyIndex: index, - Weight: 100, - HashAlgo: sema.HashAlgorithmKECCAK_256, - IsRevoked: false, - }, nil - }, - OnVerifySignature: func( + validate := func( + proof *types.COAOwnershipProofInContext, + onGetAccountKey func(addr runtime.Address, index uint32) (*cadenceStdlib.AccountKey, error), + onVerifySignature func( signature []byte, tag string, sd, publicKey []byte, signatureAlgorithm runtime.SignatureAlgorithm, - hashAlgorithm runtime.HashAlgorithm) (bool, error) { - // require.Equal(t, []byte(signedData.ToGoValue()), st) - return true, nil - }, - } + hashAlgorithm runtime.HashAlgorithm) (bool, error), + ) (cadence.Value, error) { + handler := &testContractHandler{ + deployCOA: func(_ uint64) types.Address { + return proof.EVMAddress + }, + } + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) - nextTransactionLocation := NewTransactionLocationGenerator() - nextScriptLocation := NewScriptLocationGenerator() + rt := runtime.NewInterpreterRuntime(runtime.Config{}) - // Deploy contracts + accountCodes := map[common.Location][]byte{} + var events []cadence.Event - deployContracts( - t, - rt, - contractsAddress, - runtimeInterface, - transactionEnvironment, - nextTransactionLocation, - ) + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnGetAccountKey: onGetAccountKey, + OnVerifySignature: onVerifySignature, + } - setupTx := []byte(` + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + setupTx := []byte(` import EVM from 0x1 transaction { @@ -5556,55 +5538,211 @@ func TestEVMValidateCOAOwnershipProof(t *testing.T) { } }`) - err := rt.ExecuteTransaction( - runtime.Script{ - Source: setupTx, - }, - runtime.Context{ - Interface: runtimeInterface, - Environment: transactionEnvironment, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err := rt.ExecuteTransaction( + runtime.Script{ + Source: setupTx, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: transactionEnvironment, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - script := []byte(` - import EVM from 0x1 + script := []byte(` + import EVM from 0x1 + + access(all) + fun main( + address: Address, + path: PublicPath, + signedData: [UInt8], + keyIndices: [UInt64], + signatures: [[UInt8]], + evmAddress: [UInt8; 20] + ): EVM.ValidationResult { + return EVM.validateCOAOwnershipProof( + address: address, + path: path, + signedData: signedData, + keyIndices: keyIndices, + signatures: signatures, + evmAddress: evmAddress + ) + } + `) - access(all) - fun main( - address: Address, - path: PublicPath, - signedData: [UInt8], - keyIndices: [UInt64], - signatures: [[UInt8]], - evmAddress: [UInt8; 20] - - ) { - EVM.validateCOAOwnershipProof( - address: address, - path: path, - signedData: signedData, - keyIndices: keyIndices, - signatures: signatures, - evmAddress: evmAddress - ) - } - `) + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs(proof.ToCadenceValues()), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) - // Run script - _, err = rt.ExecuteScript( - runtime.Script{ - Source: script, - Arguments: EncodeArgs(proof.ToCadenceValues()), - }, - runtime.Context{ - Interface: runtimeInterface, - Environment: scriptEnvironment, - Location: nextScriptLocation(), - }, - ) - require.NoError(t, err) + return result, err + } + + t.Run("Single key", func(t *testing.T) { + proof := &types.COAOwnershipProofInContext{ + COAOwnershipProof: types.COAOwnershipProof{ + Address: types.FlowAddress(contractsAddress), + CapabilityPath: "coa", + Signatures: []types.Signature{[]byte("signature")}, + KeyIndices: []uint64{0}, + }, + SignedData: []byte("signedData"), + EVMAddress: RandomAddress(t), + } + + result, err := validate( + proof, + func( + addr runtime.Address, + index uint32, + ) (*cadenceStdlib.AccountKey, error) { + require.Equal(t, proof.Address[:], addr[:]) + return &cadenceStdlib.AccountKey{ + PublicKey: &cadenceStdlib.PublicKey{}, + KeyIndex: index, + Weight: 1000, + HashAlgo: sema.HashAlgorithmKECCAK_256, + IsRevoked: false, + }, nil + }, + func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) { + return true, nil + }, + ) + + require.NoError(t, err) + + isValid := result.(cadence.Struct).SearchFieldByName("isValid").(cadence.Bool) + require.True(t, bool(isValid)) + }) + + t.Run("Two keys", func(t *testing.T) { + proof := &types.COAOwnershipProofInContext{ + COAOwnershipProof: types.COAOwnershipProof{ + Address: types.FlowAddress(contractsAddress), + CapabilityPath: "coa", + Signatures: []types.Signature{[]byte("signature2"), []byte("signature0")}, + KeyIndices: []uint64{2, 0}, + }, + SignedData: []byte("signedData"), + EVMAddress: RandomAddress(t), + } + + result, err := validate( + proof, + func(addr runtime.Address, index uint32) (*cadenceStdlib.AccountKey, error) { + require.Equal(t, proof.Address[:], addr[:]) + return &cadenceStdlib.AccountKey{ + PublicKey: &cadenceStdlib.PublicKey{ + // encode the key index into the public key + PublicKey: []byte{byte(index)}, + }, + KeyIndex: index, + Weight: 1000, + HashAlgo: sema.HashAlgorithmKECCAK_256, + IsRevoked: false, + }, nil + }, + func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) { + if bytes.Equal(signature, []byte("signature2")) { + require.Equal(t, byte(2), publicKey[0]) + return true, nil + } else if bytes.Equal(signature, []byte("signature0")) { + require.Equal(t, byte(0), publicKey[0]) + return true, nil + } else { + return false, nil + } + }, + ) + + require.NoError(t, err) + + isValid := result.(cadence.Struct).SearchFieldByName("isValid").(cadence.Bool) + require.True(t, bool(isValid)) + }) + + t.Run("Two keys insufficient weight", func(t *testing.T) { + proof := &types.COAOwnershipProofInContext{ + COAOwnershipProof: types.COAOwnershipProof{ + Address: types.FlowAddress(contractsAddress), + CapabilityPath: "coa", + Signatures: []types.Signature{[]byte("signature2"), []byte("signature0")}, + KeyIndices: []uint64{2, 0}, + }, + SignedData: []byte("signedData"), + EVMAddress: RandomAddress(t), + } + + result, err := validate( + proof, + func(addr runtime.Address, index uint32) (*cadenceStdlib.AccountKey, error) { + require.Equal(t, proof.Address[:], addr[:]) + return &cadenceStdlib.AccountKey{ + PublicKey: &cadenceStdlib.PublicKey{ + // encode the key index into the public key + PublicKey: []byte{byte(index)}, + }, + KeyIndex: index, + Weight: 499, + HashAlgo: sema.HashAlgorithmKECCAK_256, + IsRevoked: false, + }, nil + }, + func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) { + if bytes.Equal(signature, []byte("signature2")) { + require.Equal(t, byte(2), publicKey[0]) + return true, nil + } else if bytes.Equal(signature, []byte("signature0")) { + require.Equal(t, byte(0), publicKey[0]) + return true, nil + } else { + return false, nil + } + }, + ) + + require.NoError(t, err) + + isValid := result.(cadence.Struct).SearchFieldByName("isValid").(cadence.Bool) + require.False(t, bool(isValid)) + message := result.(cadence.Struct). + SearchFieldByName("problem").(cadence.Optional). + Value.(cadence.String).String() + require.Equal(t, "\"the given signatures are not valid or provide enough weight\"", message) + }) } func TestInternalEVMAccess(t *testing.T) { diff --git a/fvm/executionParameters.go b/fvm/executionParameters.go index 34e3f4594cb..db88f42ef5f 100644 --- a/fvm/executionParameters.go +++ b/fvm/executionParameters.go @@ -5,6 +5,11 @@ import ( "fmt" "math" + "github.com/coreos/go-semver/semver" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/convert" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/cadence" @@ -52,75 +57,82 @@ func getBasicMeterParameters( return params } -// getBodyMeterParameters returns the set of meter parameters used for -// transaction/script body execution. -func getBodyMeterParameters( +// getExecutionParameters returns the set of meter parameters used for +// transaction/script body execution and the minimum required version as defined by the +// NodeVersionBeacon contract. +func getExecutionParameters( + log zerolog.Logger, ctx Context, proc Procedure, txnState storage.TransactionPreparer, -) ( - meter.MeterParameters, - *snapshot.ExecutionSnapshot, - error, -) { - procParams := getBasicMeterParameters(ctx, proc) +) (state.ExecutionParameters, *snapshot.ExecutionSnapshot, error) { + meterParams := getBasicMeterParameters(ctx, proc) - overrides, meterStateRead, err := txnState.GetMeterParamOverrides( + executionParams, executionParamsStateRead, err := txnState.GetStateExecutionParameters( txnState, - NewMeterParamOverridesComputer(ctx, txnState)) + NewExecutionParametersComputer(log, ctx, txnState)) if err != nil { - return procParams, nil, err + return state.ExecutionParameters{ + MeterParameters: meterParams, + ExecutionVersion: semver.Version{}, + }, nil, err } - if overrides.ComputationWeights != nil { - procParams = procParams.WithComputationWeights( - overrides.ComputationWeights) + if executionParams.ComputationWeights != nil { + meterParams = meterParams.WithComputationWeights( + executionParams.ComputationWeights) } - if overrides.MemoryWeights != nil { - procParams = procParams.WithMemoryWeights(overrides.MemoryWeights) + if executionParams.MemoryWeights != nil { + meterParams = meterParams.WithMemoryWeights(executionParams.MemoryWeights) } - if overrides.MemoryLimit != nil { - procParams = procParams.WithMemoryLimit(*overrides.MemoryLimit) + if executionParams.MemoryLimit != nil { + meterParams = meterParams.WithMemoryLimit(*executionParams.MemoryLimit) } // NOTE: The memory limit (and interaction limit) may be overridden by the // environment. We need to ignore the override in that case. if proc.ShouldDisableMemoryAndInteractionLimits(ctx) { - procParams = procParams.WithMemoryLimit(math.MaxUint64). + meterParams = meterParams.WithMemoryLimit(math.MaxUint64). WithStorageInteractionLimit(math.MaxUint64) } - return procParams, meterStateRead, nil + return state.ExecutionParameters{ + MeterParameters: meterParams, + ExecutionVersion: executionParams.ExecutionVersion, + }, executionParamsStateRead, nil } -type MeterParamOverridesComputer struct { +type ExecutionParametersComputer struct { + log zerolog.Logger ctx Context txnState storage.TransactionPreparer } -func NewMeterParamOverridesComputer( +func NewExecutionParametersComputer( + log zerolog.Logger, ctx Context, txnState storage.TransactionPreparer, -) MeterParamOverridesComputer { - return MeterParamOverridesComputer{ +) ExecutionParametersComputer { + return ExecutionParametersComputer{ + log: log, ctx: ctx, txnState: txnState, } } -func (computer MeterParamOverridesComputer) Compute( +func (computer ExecutionParametersComputer) Compute( _ state.NestedTransactionPreparer, _ struct{}, ) ( - derived.MeterParamOverrides, + derived.StateExecutionParameters, error, ) { - var overrides derived.MeterParamOverrides + var overrides derived.StateExecutionParameters var err error computer.txnState.RunWithAllLimitsDisabled(func() { - overrides, err = computer.getMeterParamOverrides() + overrides, err = computer.getExecutionParameters() }) if err != nil { @@ -132,8 +144,8 @@ func (computer MeterParamOverridesComputer) Compute( return overrides, nil } -func (computer MeterParamOverridesComputer) getMeterParamOverrides() ( - derived.MeterParamOverrides, +func (computer ExecutionParametersComputer) getExecutionParameters() ( + derived.StateExecutionParameters, error, ) { // Check that the service account exists because all the settings are @@ -147,7 +159,7 @@ func (computer MeterParamOverridesComputer) getMeterParamOverrides() ( computer.ctx.EnvironmentParams, computer.txnState) - overrides := derived.MeterParamOverrides{} + overrides := derived.StateExecutionParameters{} // set the property if no error, but if the error is a fatal error then // return it @@ -204,6 +216,15 @@ func (computer MeterParamOverridesComputer) getMeterParamOverrides() ( return overrides, err } + executionVersion, err := GetMinimumRequiredExecutionVersion(computer.log, computer.ctx, env) + err = setIfOk( + "execution version", + err, + func() { overrides.ExecutionVersion = executionVersion }) + if err != nil { + return overrides, err + } + return overrides, nil } @@ -336,3 +357,40 @@ func GetExecutionMemoryLimit( return uint64(memoryLimitRaw), nil } + +func GetMinimumRequiredExecutionVersion( + log zerolog.Logger, + ctx Context, + env environment.Environment, +) (semver.Version, error) { + if !ctx.ReadVersionFromNodeVersionBeacon { + return semver.Version{}, nil + } + + // the current version boundary defines a block height and a minimum required version that is required past that block height. + value, err := env.GetCurrentVersionBoundary() + + if err != nil { + return semver.Version{}, fmt.Errorf("could not get current version boundary: %w", err) + } + + boundary, err := convert.VersionBoundary(value) + + if err != nil { + return semver.Version{}, fmt.Errorf("could not parse current version boundary: %w", err) + } + + semVer, err := semver.NewVersion(boundary.Version) + if err != nil { + // This could be problematic, if the version is not a valid semver version. The NodeVersionBeacon should prevent + // this, but it could have bugs. + // Erroring here gives us no way to recover as no transactions would work anymore, + // instead return the version as 0.0.0 and log the error, allowing us to recover. + // this would mean that any if-statements that were relying on a higher version would fail, + // but that is preferable to all transactions halting. + log.Error().Err(err).Msg("could not parse version boundary. Version boundary as defined in the NodeVersionBeacon contract is not a valid semver version!") + return semver.Version{}, nil + } + + return *semVer, nil +} diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index cb0d270f725..fb88f78f287 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -8,6 +8,15 @@ import ( "math" "strings" "testing" + "time" + + "github.com/rs/zerolog" + + "github.com/coreos/go-semver/semver" + "github.com/onflow/flow-core-contracts/lib/go/templates" + + "github.com/onflow/flow-go/fvm/storage" + "github.com/onflow/flow-go/fvm/storage/state" cadenceStdlib "github.com/onflow/cadence/stdlib" @@ -3404,3 +3413,193 @@ func TestCrypto(t *testing.T) { test(t, fmt.Sprintf("import Crypto from %s", cryptoContractAddress)) }) } + +func Test_MinimumRequiredVersion(t *testing.T) { + + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + log := zerolog.New(zerolog.NewTestWriter(t)) + + getVersion := func(ctx fvm.Context, snapshotTree snapshot.SnapshotTree) string { + blockDatabase := storage.NewBlockDatabase( + snapshotTree, + 0, + nil) + + txnState, err := blockDatabase.NewTransaction(0, state.DefaultParameters()) + require.NoError(t, err) + + executionParams, _, err := txnState.GetStateExecutionParameters( + txnState, + fvm.NewExecutionParametersComputer(log, ctx, txnState)) + require.NoError(t, err) + + // this will set the parameters to the txnState. + // this is done at the beginning of a transaction/script + txnId, err := txnState.BeginNestedTransactionWithMeterParams( + state.ExecutionParameters{ + ExecutionVersion: executionParams.ExecutionVersion, + }) + require.NoError(t, err) + + mrv := environment.NewMinimumCadenceRequiredVersion(txnState) + + v, err := mrv.MinimumRequiredVersion() + + require.NoError(t, err) + _, err = txnState.CommitNestedTransaction(txnId) + require.NoError(t, err) + + return v + } + + insertVersionBoundary := func(newVersion semver.Version, currentHeight, insertHeight uint64, ctx fvm.Context, snapshotTree snapshot.SnapshotTree, vm fvm.VM, txIndex uint32) snapshot.SnapshotTree { + setVersionBoundaryScript := templates.GenerateSetVersionBoundaryScript(sc.AsTemplateEnv()) + tx := flow.NewTransactionBody(). + SetScript(setVersionBoundaryScript). + SetProposalKey(sc.FlowServiceAccount.Address, 0, 0). + AddAuthorizer(sc.FlowServiceAccount.Address). + SetPayer(sc.FlowServiceAccount.Address) + + tx. + AddArgument(jsoncdc.MustEncode(cadence.UInt8(newVersion.Major))). + AddArgument(jsoncdc.MustEncode(cadence.UInt8(newVersion.Minor))). + AddArgument(jsoncdc.MustEncode(cadence.UInt8(newVersion.Patch))). + AddArgument(jsoncdc.MustEncode(cadence.String(newVersion.PreRelease))) + + tx.AddArgument(jsoncdc.MustEncode(cadence.UInt64(insertHeight))) + + startHeader := flow.Header{ + Height: currentHeight, + ChainID: chain.ChainID(), + Timestamp: time.Now().UTC(), + } + + blocks := new(envMock.Blocks) + ctxWithBlock := fvm.NewContextFromParent( + ctx, + fvm.WithBlockHeader(&startHeader), + fvm.WithBlocks(blocks), + ) + + executionSnapshot, output, err := vm.Run( + ctxWithBlock, + fvm.Transaction(tx, txIndex), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + return snapshotTree.Append(executionSnapshot) + } + + runSystemTxToUpdateNodeVersionBeaconContract := func(atHeight uint64, ctx fvm.Context, snapshotTree snapshot.SnapshotTree, vm fvm.VM, txIndex uint32) snapshot.SnapshotTree { + txBody := flow.NewTransactionBody(). + SetScript([]byte(fmt.Sprintf(` + import NodeVersionBeacon from %s + + transaction { + prepare(serviceAccount: auth(BorrowValue) &Account) { + + let versionBeaconHeartbeat = serviceAccount.storage + .borrow<&NodeVersionBeacon.Heartbeat>(from: NodeVersionBeacon.HeartbeatStoragePath) + ?? panic("Couldn't borrow NodeVersionBeacon.Heartbeat Resource") + versionBeaconHeartbeat.heartbeat() + } + } + `, + sc.NodeVersionBeacon.Address.HexWithPrefix()))). + SetProposalKey(sc.FlowServiceAccount.Address, 0, 0). + AddAuthorizer(sc.FlowServiceAccount.Address). + SetPayer(sc.FlowServiceAccount.Address) + + endHeader := flow.Header{ + Height: atHeight, + ChainID: chain.ChainID(), + Timestamp: time.Now().UTC(), + } + + blocks := new(envMock.Blocks) + ctxWithBlock := fvm.NewContextFromParent(ctx, + fvm.WithBlockHeader(&endHeader), + fvm.WithBlocks(blocks), + ) + + executionSnapshot, output, err := vm.Run( + ctxWithBlock, + fvm.Transaction(txBody, txIndex), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + return snapshotTree.Append(executionSnapshot) + } + + t.Run("minimum required version", newVMTest(). + withContextOptions( + fvm.WithChain(chain), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + ). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + // default version is empty + require.Equal(t, semver.Version{}.String(), getVersion(ctx, snapshotTree)) + + // define mapping for flow go version to cadence version + flowVersion1 := semver.Version{ + Major: 1, + Minor: 2, + Patch: 3, + PreRelease: "rc.1", + } + cadenceVersion1 := semver.Version{ + Major: 2, + Minor: 1, + Patch: 3, + PreRelease: "rc.2", + } + environment.SetFVMToCadenceVersionMappingForTestingOnly( + environment.FlowGoToCadenceVersionMapping{ + FlowGoVersion: flowVersion1, + CadenceVersion: cadenceVersion1, + }) + + h0 := uint64(100) // starting height + hv1 := uint64(2000) // version boundary height + + txIndex := uint32(0) + + // insert version boundary 1 + snapshotTree = insertVersionBoundary(flowVersion1, h0, hv1, ctx, snapshotTree, vm, txIndex) + txIndex += 1 + + // so far no change: + require.Equal(t, semver.Version{}.String(), getVersion(ctx, snapshotTree)) + + // system transaction needs to run to update the flowVersion on chain + snapshotTree = runSystemTxToUpdateNodeVersionBeaconContract(hv1-1, ctx, snapshotTree, vm, txIndex) + txIndex += 1 + + // no change: + require.Equal(t, semver.Version{}.String(), getVersion(ctx, snapshotTree)) + + // system transaction needs to run to update the flowVersion on chain + snapshotTree = runSystemTxToUpdateNodeVersionBeaconContract(hv1, ctx, snapshotTree, vm, txIndex) + txIndex += 1 + + // switch to cadence version 1 + require.Equal(t, cadenceVersion1.String(), getVersion(ctx, snapshotTree)) + + // system transaction needs to run to update the flowVersion on chain + snapshotTree = runSystemTxToUpdateNodeVersionBeaconContract(hv1+1, ctx, snapshotTree, vm, txIndex) + + // still cadence version 1 + require.Equal(t, cadenceVersion1.String(), getVersion(ctx, snapshotTree)) + })) +} diff --git a/fvm/meter/computation_meter.go b/fvm/meter/computation_meter.go index 25e0b63ff54..d6f7fe55331 100644 --- a/fvm/meter/computation_meter.go +++ b/fvm/meter/computation_meter.go @@ -116,10 +116,28 @@ func (m *ComputationMeter) ComputationAvailable( if !ok { return true } + potentialComputationUsage := m.computationUsed + w*uint64(intensity) return potentialComputationUsage <= m.params.computationLimit } +// ComputationRemaining returns the remaining computation (intensity) left in the transaction for the given type +func (m *ComputationMeter) ComputationRemaining(kind common.ComputationKind) uint { + w, ok := m.params.computationWeights[kind] + // if not found return has capacity + // given the behaviour of MeterComputation is ignoring intensities without a set weight + if !ok { + return math.MaxUint + } + + remainingComputationUsage := m.params.computationLimit - m.computationUsed + if remainingComputationUsage <= 0 { + return 0 + } + + return uint(remainingComputationUsage / w) +} + // ComputationIntensities returns all the measured computational intensities func (m *ComputationMeter) ComputationIntensities() MeteredComputationIntensities { return m.computationIntensities diff --git a/fvm/meter/meter_test.go b/fvm/meter/meter_test.go index cba3643e151..f691b03b6d0 100644 --- a/fvm/meter/meter_test.go +++ b/fvm/meter/meter_test.go @@ -139,6 +139,27 @@ func TestWeightedComputationMetering(t *testing.T) { require.True(t, m.ComputationAvailable(1, 10)) }) + t.Run("check computation remaining", func(t *testing.T) { + m := meter.NewMeter( + meter.DefaultParameters(). + WithComputationLimit(10). + WithComputationWeights( + map[common.ComputationKind]uint64{0: 1 << meter.MeterExecutionInternalPrecisionBytes}), + ) + + remaining := m.ComputationRemaining(0) + require.Equal(t, uint(10), remaining) + + err := m.MeterComputation(0, 1) + require.NoError(t, err) + require.Equal(t, uint64(1), m.TotalComputationUsed()) + + require.Equal(t, uint(9), m.ComputationRemaining(0)) + + // test a type without a weight (default zero) + require.Equal(t, uint(math.MaxUint), m.ComputationRemaining(1)) + }) + t.Run("merge meters", func(t *testing.T) { compKind := common.ComputationKind(0) m := meter.NewMeter( diff --git a/fvm/script.go b/fvm/script.go index 9a5a2551f4d..4d8a86323b8 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -173,7 +173,8 @@ func (executor *scriptExecutor) Execute() error { } func (executor *scriptExecutor) execute() error { - meterParams, _, err := getBodyMeterParameters( + executionParams, _, err := getExecutionParameters( + executor.env.Logger(), executor.ctx, executor.proc, executor.txnState) @@ -182,7 +183,7 @@ func (executor *scriptExecutor) execute() error { } txnId, err := executor.txnState.BeginNestedTransactionWithMeterParams( - meterParams) + executionParams) if err != nil { return err } diff --git a/fvm/storage/derived/derived_block_data.go b/fvm/storage/derived/derived_block_data.go index 96233359333..14deca315b7 100644 --- a/fvm/storage/derived/derived_block_data.go +++ b/fvm/storage/derived/derived_block_data.go @@ -3,12 +3,11 @@ package derived import ( "fmt" - "github.com/onflow/flow-go/fvm/storage/snapshot" - "github.com/onflow/cadence/common" "github.com/onflow/cadence/interpreter" "github.com/onflow/flow-go/fvm/storage/logical" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" ) @@ -23,11 +22,12 @@ type DerivedTransactionPreparer interface { ) GetProgram(location common.AddressLocation) (*Program, bool) - GetMeterParamOverrides( + // GetStateExecutionParameters returns parameters needed for execution from the state. + GetStateExecutionParameters( txnState state.NestedTransactionPreparer, - getMeterParamOverrides ValueComputer[struct{}, MeterParamOverrides], + getMeterParamOverrides ValueComputer[struct{}, StateExecutionParameters], ) ( - MeterParamOverrides, + StateExecutionParameters, *snapshot.ExecutionSnapshot, error, ) @@ -46,7 +46,7 @@ type Program struct { type DerivedBlockData struct { programs *DerivedDataTable[common.AddressLocation, *Program] - meterParamOverrides *DerivedDataTable[struct{}, MeterParamOverrides] + meterParamOverrides *DerivedDataTable[struct{}, StateExecutionParameters] } // DerivedTransactionData is the derived data scratch space for a single @@ -59,7 +59,7 @@ type DerivedTransactionData struct { // There's only a single entry in this table. For simplicity, we'll use // struct{} as the entry's key. - meterParamOverrides *TableTransaction[struct{}, MeterParamOverrides] + executionParameters *TableTransaction[struct{}, StateExecutionParameters] } func NewEmptyDerivedBlockData( @@ -72,7 +72,7 @@ func NewEmptyDerivedBlockData( ](initialSnapshotTime), meterParamOverrides: NewEmptyTable[ struct{}, - MeterParamOverrides, + StateExecutionParameters, ](initialSnapshotTime), } } @@ -87,14 +87,14 @@ func (block *DerivedBlockData) NewChildDerivedBlockData() *DerivedBlockData { func (block *DerivedBlockData) NewSnapshotReadDerivedTransactionData() *DerivedTransactionData { return &DerivedTransactionData{ programs: block.programs.NewSnapshotReadTableTransaction(), - meterParamOverrides: block.meterParamOverrides.NewSnapshotReadTableTransaction(), + executionParameters: block.meterParamOverrides.NewSnapshotReadTableTransaction(), } } func (block *DerivedBlockData) NewCachingSnapshotReadDerivedTransactionData() *DerivedTransactionData { return &DerivedTransactionData{ programs: block.programs.NewCachingSnapshotReadTableTransaction(), - meterParamOverrides: block.meterParamOverrides.NewCachingSnapshotReadTableTransaction(), + executionParameters: block.meterParamOverrides.NewCachingSnapshotReadTableTransaction(), } } @@ -121,7 +121,7 @@ func (block *DerivedBlockData) NewDerivedTransactionData( return &DerivedTransactionData{ programs: txnPrograms, - meterParamOverrides: txnMeterParamOverrides, + executionParameters: txnMeterParamOverrides, }, nil } @@ -178,19 +178,19 @@ func (transaction *DerivedTransactionData) AddInvalidator( } transaction.programs.AddInvalidator(invalidator.ProgramInvalidator()) - transaction.meterParamOverrides.AddInvalidator( - invalidator.MeterParamOverridesInvalidator()) + transaction.executionParameters.AddInvalidator( + invalidator.ExecutionParametersInvalidator()) } -func (transaction *DerivedTransactionData) GetMeterParamOverrides( +func (transaction *DerivedTransactionData) GetStateExecutionParameters( txnState state.NestedTransactionPreparer, - getMeterParamOverrides ValueComputer[struct{}, MeterParamOverrides], + getMeterParamOverrides ValueComputer[struct{}, StateExecutionParameters], ) ( - MeterParamOverrides, + StateExecutionParameters, *snapshot.ExecutionSnapshot, error, ) { - return transaction.meterParamOverrides.GetWithStateOrCompute( + return transaction.executionParameters.GetWithStateOrCompute( txnState, struct{}{}, getMeterParamOverrides) @@ -202,7 +202,7 @@ func (transaction *DerivedTransactionData) Validate() error { return fmt.Errorf("programs validate failed: %w", err) } - err = transaction.meterParamOverrides.Validate() + err = transaction.executionParameters.Validate() if err != nil { return fmt.Errorf("meter param overrides validate failed: %w", err) } @@ -216,7 +216,7 @@ func (transaction *DerivedTransactionData) Commit() error { return fmt.Errorf("programs commit failed: %w", err) } - err = transaction.meterParamOverrides.Commit() + err = transaction.executionParameters.Commit() if err != nil { return fmt.Errorf("meter param overrides commit failed: %w", err) } diff --git a/fvm/storage/derived/invalidator.go b/fvm/storage/derived/invalidator.go index a17bded4c67..9467c169bdc 100644 --- a/fvm/storage/derived/invalidator.go +++ b/fvm/storage/derived/invalidator.go @@ -1,6 +1,7 @@ package derived import ( + "github.com/coreos/go-semver/semver" "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/meter" @@ -12,17 +13,23 @@ type MeterParamOverrides struct { MemoryLimit *uint64 // nil indicates no override } +// StateExecutionParameters are parameters needed for execution defined in the execution state. +type StateExecutionParameters struct { + MeterParamOverrides + ExecutionVersion semver.Version +} + type ProgramInvalidator TableInvalidator[ common.AddressLocation, *Program, ] -type MeterParamOverridesInvalidator TableInvalidator[ +type ExecutionParametersInvalidator TableInvalidator[ struct{}, - MeterParamOverrides, + StateExecutionParameters, ] type TransactionInvalidator interface { ProgramInvalidator() ProgramInvalidator - MeterParamOverridesInvalidator() MeterParamOverridesInvalidator + ExecutionParametersInvalidator() ExecutionParametersInvalidator } diff --git a/fvm/storage/state/execution_state.go b/fvm/storage/state/execution_state.go index ca67bb0a0c0..12968e4b0a6 100644 --- a/fvm/storage/state/execution_state.go +++ b/fvm/storage/state/execution_state.go @@ -2,6 +2,9 @@ package state import ( "fmt" + "math" + + "github.com/coreos/go-semver/semver" "github.com/onflow/cadence/common" "github.com/onflow/crypto/hash" @@ -27,7 +30,8 @@ type ExecutionState struct { finalized bool *spockState - meter *meter.Meter + meter *meter.Meter + executionVersion semver.Version // NOTE: parent and child state shares the same limits controller *limitsController @@ -40,6 +44,11 @@ type StateParameters struct { maxValueSizeAllowed uint64 } +type ExecutionParameters struct { + meter.MeterParameters + ExecutionVersion semver.Version +} + func DefaultParameters() StateParameters { return StateParameters{ MeterParameters: meter.DefaultParameters(), @@ -129,19 +138,20 @@ func NewExecutionStateWithSpockStateHasher( // NewChildWithMeterParams generates a new child state using the provide meter // parameters. func (state *ExecutionState) NewChildWithMeterParams( - params meter.MeterParameters, + params ExecutionParameters, ) *ExecutionState { return &ExecutionState{ finalized: false, spockState: state.spockState.NewChild(), - meter: meter.NewMeter(params), + meter: meter.NewMeter(params.MeterParameters), + executionVersion: params.ExecutionVersion, limitsController: state.limitsController, } } // NewChild generates a new child state using the parent's meter parameters. func (state *ExecutionState) NewChild() *ExecutionState { - return state.NewChildWithMeterParams(state.meter.MeterParameters) + return state.NewChildWithMeterParams(state.ExecutionParameters()) } // InteractionUsed returns the amount of ledger interaction (total ledger byte read + total ledger byte written) @@ -235,6 +245,20 @@ func (state *ExecutionState) ComputationAvailable(kind common.ComputationKind, i return true } +// ComputationRemaining returns the available computation capacity without metering +func (state *ExecutionState) ComputationRemaining(kind common.ComputationKind) uint { + if state.finalized { + // if state is finalized return 0 + return 0 + } + + if state.enforceLimits { + return state.meter.ComputationRemaining(kind) + } + + return math.MaxUint +} + // TotalComputationUsed returns total computation used func (state *ExecutionState) TotalComputationUsed() uint64 { return state.meter.TotalComputationUsed() @@ -337,6 +361,13 @@ func (state *ExecutionState) checkSize( return nil } +func (state *ExecutionState) ExecutionParameters() ExecutionParameters { + return ExecutionParameters{ + MeterParameters: state.meter.MeterParameters, + ExecutionVersion: state.executionVersion, + } +} + func (state *ExecutionState) readSetSize() int { return state.spockState.readSetSize() } diff --git a/fvm/storage/state/transaction_state.go b/fvm/storage/state/transaction_state.go index 4175a9d4941..1b00bf78973 100644 --- a/fvm/storage/state/transaction_state.go +++ b/fvm/storage/state/transaction_state.go @@ -22,6 +22,7 @@ func (id NestedTransactionId) StateForTestingOnly() *ExecutionState { type Meter interface { MeterComputation(kind common.ComputationKind, intensity uint) error ComputationAvailable(kind common.ComputationKind, intensity uint) bool + ComputationRemaining(kind common.ComputationKind) uint ComputationIntensities() meter.MeteredComputationIntensities TotalComputationLimit() uint TotalComputationUsed() uint64 @@ -44,6 +45,9 @@ type Meter interface { type NestedTransactionPreparer interface { Meter + // ExecutionParameters returns the execution parameters + ExecutionParameters() ExecutionParameters + // NumNestedTransactions returns the number of uncommitted nested // transactions. Note that the main transaction is not considered a // nested transaction. @@ -83,7 +87,7 @@ type NestedTransactionPreparer interface { // the provided meter parameters. This returns error if the current nested // transaction is program restricted. BeginNestedTransactionWithMeterParams( - params meter.MeterParameters, + params ExecutionParameters, ) ( NestedTransactionId, error, @@ -199,6 +203,10 @@ func (txnState *transactionState) current() nestedTransactionStackFrame { return txnState.nestedTransactions[txnState.NumNestedTransactions()] } +func (txnState *transactionState) ExecutionParameters() ExecutionParameters { + return txnState.current().ExecutionParameters() +} + func (txnState *transactionState) NumNestedTransactions() int { return len(txnState.nestedTransactions) - 1 } @@ -266,7 +274,7 @@ func (txnState *transactionState) BeginNestedTransaction() ( } func (txnState *transactionState) BeginNestedTransactionWithMeterParams( - params meter.MeterParameters, + params ExecutionParameters, ) ( NestedTransactionId, error, @@ -451,6 +459,10 @@ func (txnState *transactionState) ComputationAvailable( return txnState.current().ComputationAvailable(kind, intensity) } +func (txnState *transactionState) ComputationRemaining(kind common.ComputationKind) uint { + return txnState.current().ComputationRemaining(kind) +} + func (txnState *transactionState) MeterMemory( kind common.MemoryKind, intensity uint, diff --git a/fvm/storage/state/transaction_state_test.go b/fvm/storage/state/transaction_state_test.go index e41ff7951f9..f8b18b5d186 100644 --- a/fvm/storage/state/transaction_state_test.go +++ b/fvm/storage/state/transaction_state_test.go @@ -104,7 +104,9 @@ func TestUnrestrictedNestedTransactionDifferentMeterParams(t *testing.T) { require.Equal(t, uint(math.MaxUint), mainState.TotalMemoryLimit()) id1, err := txn.BeginNestedTransactionWithMeterParams( - meter.DefaultParameters().WithMemoryLimit(1)) + state.ExecutionParameters{ + MeterParameters: meter.DefaultParameters().WithMemoryLimit(1), + }) require.NoError(t, err) nestedState1 := id1.StateForTestingOnly() @@ -112,7 +114,9 @@ func TestUnrestrictedNestedTransactionDifferentMeterParams(t *testing.T) { require.Equal(t, uint(1), nestedState1.TotalMemoryLimit()) id2, err := txn.BeginNestedTransactionWithMeterParams( - meter.DefaultParameters().WithMemoryLimit(2)) + state.ExecutionParameters{ + MeterParameters: meter.DefaultParameters().WithMemoryLimit(2), + }) require.NoError(t, err) nestedState2 := id2.StateForTestingOnly() diff --git a/fvm/systemcontracts/system_contracts.go b/fvm/systemcontracts/system_contracts.go index 3760044698e..ad9f66c4a65 100644 --- a/fvm/systemcontracts/system_contracts.go +++ b/fvm/systemcontracts/system_contracts.go @@ -65,6 +65,7 @@ const ( ContractStorageFeesFunction_calculateAccountCapacity = "calculateAccountCapacity" ContractStorageFeesFunction_getAccountsCapacityForTransactionStorageCheck = "getAccountsCapacityForTransactionStorageCheck" ContractStorageFeesFunction_defaultTokenAvailableBalance = "defaultTokenAvailableBalance" + ContractVersionBeacon_getCurrentVersionBoundary = "getCurrentVersionBoundary" // These are the account indexes of system contracts as deployed by the default bootstrapping. // On long-running networks some of these contracts might have been deployed after bootstrapping, diff --git a/fvm/transactionInvoker.go b/fvm/transactionInvoker.go index 3b97964db74..6edf2ec5405 100644 --- a/fvm/transactionInvoker.go +++ b/fvm/transactionInvoker.go @@ -34,6 +34,8 @@ type TransactionExecutorParams struct { // Note: This is disabled only by tests TransactionBodyExecutionEnabled bool + + ReadVersionFromNodeVersionBeacon bool } func DefaultTransactionExecutorParams() TransactionExecutorParams { @@ -42,6 +44,7 @@ func DefaultTransactionExecutorParams() TransactionExecutorParams { SequenceNumberCheckAndIncrementEnabled: true, AccountKeyWeightThreshold: AccountKeyWeightThreshold, TransactionBodyExecutionEnabled: true, + ReadVersionFromNodeVersionBeacon: true, } } @@ -68,7 +71,7 @@ type transactionExecutor struct { // the state reads needed to compute the metering parameters // this is used to invalidate the metering parameters if a transaction // writes to any of those registers - meterStateRead *snapshot.ExecutionSnapshot + executionStateRead *snapshot.ExecutionSnapshot cadenceRuntime *reusableRuntime.ReusableCadenceRuntime txnBodyExecutor runtime.Executor @@ -199,27 +202,27 @@ func (executor *transactionExecutor) preprocessTransactionBody() error { return err } } - // get meter parameters - meterParams, meterStateRead, err := getBodyMeterParameters( + executionParameters, executionStateRead, err := getExecutionParameters( + executor.env.Logger(), executor.ctx, executor.proc, executor.txnState) if err != nil { - return fmt.Errorf("error getting meter parameters: %w", err) + return fmt.Errorf("error getting execution parameters: %w", err) } - if len(meterStateRead.WriteSet) != 0 { + if len(executionStateRead.WriteSet) != 0 { // this should never happen // and indicates an implementation error - panic("getting metering parameters should not write to registers") + panic("getting execution parameters should not write to registers") } - // we need to save the meter state read for invalidation purposes - executor.meterStateRead = meterStateRead + // we need to save the execution state read for invalidation purposes + executor.executionStateRead = executionStateRead txnId, err := executor.txnState.BeginNestedTransactionWithMeterParams( - meterParams) + executionParameters) if err != nil { return err } @@ -403,7 +406,8 @@ func (executor *transactionExecutor) normalExecution() ( invalidator = environment.NewDerivedDataInvalidator( contractUpdates, bodySnapshot, - executor.meterStateRead) + executor.executionStateRead, + ) // Check if all account storage limits are ok // diff --git a/integration/benchmark/load/load_type_test.go b/integration/benchmark/load/load_type_test.go index b63a9c18903..a856d4540e9 100644 --- a/integration/benchmark/load/load_type_test.go +++ b/integration/benchmark/load/load_type_test.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" - convert2 "github.com/onflow/flow-emulator/convert" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -28,6 +27,7 @@ import ( "github.com/onflow/flow-go/integration/benchmark/common" "github.com/onflow/flow-go/integration/benchmark/load" "github.com/onflow/flow-go/integration/convert" + "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -254,7 +254,7 @@ func (t *testTransactionSender) Send(tx *sdk.Transaction) (sdk.TransactionResult Error: result.Err, BlockID: sdk.EmptyID, BlockHeight: 0, - TransactionID: convert2.FlowIdentifierToSDK(txBody.ID()), + TransactionID: emulator.FlowIdentifierToSDK(txBody.ID()), CollectionID: sdk.EmptyID, } diff --git a/integration/dkg/dkg_client_test.go b/integration/dkg/dkg_client_test.go index 2399c1401e5..728915004de 100644 --- a/integration/dkg/dkg_client_test.go +++ b/integration/dkg/dkg_client_test.go @@ -10,10 +10,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" - emulator "github.com/onflow/flow-emulator/emulator" - "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" @@ -23,6 +20,7 @@ import ( sdktemplates "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go-sdk/test" + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/dkg" @@ -34,13 +32,13 @@ type ClientSuite struct { contractClient *dkg.Client - env templates.Environment - blockchain emulator.Emulator - emulatorClient *utils.EmulatorClient - - dkgAddress sdk.Address - dkgAccountKey *sdk.AccountKey - dkgSigner sdkcrypto.Signer + env templates.Environment + blockchain emulator.Emulator + emulatorClient *utils.EmulatorClient + serviceAccountAddress sdk.Address + dkgAddress sdk.Address + dkgAccountKey *sdk.AccountKey + dkgSigner sdkcrypto.Signer } func TestDKGClient(t *testing.T) { @@ -57,7 +55,7 @@ func (s *ClientSuite) SetupTest() { s.blockchain = blockchain s.emulatorClient = utils.NewEmulatorClient(blockchain) - + s.serviceAccountAddress = sdk.Address(s.blockchain.ServiceKey().Address) // deploy contract s.deployDKGContract() @@ -234,16 +232,16 @@ func (s *ClientSuite) setUpAdmin() { setUpAdminTx := sdk.NewTransaction(). SetScript(templates.GeneratePublishDKGParticipantScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, s.blockchain.ServiceKey().Index, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) s.signAndSubmit(setUpAdminTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) } @@ -262,9 +260,9 @@ func (s *ClientSuite) startDKGWithParticipants(nodeIDs []flow.Identifier) { startDKGTx := sdk.NewTransaction(). SetScript(templates.GenerateStartDKGScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, s.blockchain.ServiceKey().Index, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) err := startDKGTx.AddArgument(cadence.NewArray(valueNodeIDs)) @@ -274,7 +272,7 @@ func (s *ClientSuite) startDKGWithParticipants(nodeIDs []flow.Identifier) { require.NoError(s.T(), err) s.signAndSubmit(startDKGTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) @@ -293,9 +291,9 @@ func (s *ClientSuite) createParticipant(nodeID flow.Identifier, authoriser sdk.A createParticipantTx := sdk.NewTransaction(). SetScript(templates.GenerateCreateDKGParticipantScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, s.blockchain.ServiceKey().Index, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(authoriser) err := createParticipantTx.AddArgument(cadence.NewAddress(s.dkgAddress)) @@ -310,7 +308,7 @@ func (s *ClientSuite) createParticipant(nodeID flow.Identifier, authoriser sdk.A require.NoError(s.T(), err) s.signAndSubmit(createParticipantTx, - []sdk.Address{s.blockchain.ServiceKey().Address, authoriser}, + []sdk.Address{s.serviceAccountAddress, authoriser}, []sdkcrypto.Signer{s2, signer}, ) diff --git a/integration/dkg/dkg_emulator_suite.go b/integration/dkg/dkg_emulator_suite.go index ee66e7e594c..d95d0baa99c 100644 --- a/integration/dkg/dkg_emulator_suite.go +++ b/integration/dkg/dkg_emulator_suite.go @@ -14,24 +14,23 @@ import ( jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" - emulator "github.com/onflow/flow-emulator/emulator" sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" sdktemplates "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go-sdk/test" - "github.com/onflow/flow-go/module/metrics" - dkgeng "github.com/onflow/flow-go/engine/consensus/dkg" "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/fvm/systemcontracts" + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/dkg" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/storage/badger" @@ -54,10 +53,10 @@ type EmulatorSuite struct { dkgAccountKey *sdk.AccountKey dkgSigner sdkcrypto.Signer checkDKGUnhappy bool // activate log hook for DKGBroker to check if the DKG core is flagging misbehaviours - - netIDs flow.IdentityList - nodeAccounts []*nodeAccount - nodes []*node + serviceAccountAddress sdk.Address + netIDs flow.IdentityList + nodeAccounts []*nodeAccount + nodes []*node } func (s *EmulatorSuite) SetupTest() { @@ -119,7 +118,7 @@ func (s *EmulatorSuite) initEmulator() { s.Require().NoError(err) s.blockchain = blockchain - + s.serviceAccountAddress = sdk.Address(s.blockchain.ServiceKey().Address) s.adminEmulatorClient = utils.NewEmulatorClient(blockchain) s.hub = stub.NewNetworkHub() @@ -163,15 +162,15 @@ func (s *EmulatorSuite) setupDKGAdmin() { SetScript(templates.GeneratePublishDKGParticipantScript(s.env)). SetComputeLimit(9999). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(setUpAdminTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) require.NoError(s.T(), err) @@ -229,13 +228,13 @@ func (s *EmulatorSuite) createAndFundAccount(netID bootstrap.NodeInfo) *nodeAcco sc.FungibleToken.Address.Hex(), sc.FlowToken.Address.Hex(), ))). - AddAuthorizer(s.blockchain.ServiceKey().Address). + AddAuthorizer(s.serviceAccountAddress). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber, ). - SetPayer(s.blockchain.ServiceKey().Address) + SetPayer(s.serviceAccountAddress) err = fundAccountTx.AddArgument(cadence.UFix64(1_000_000)) require.NoError(s.T(), err) @@ -244,7 +243,7 @@ func (s *EmulatorSuite) createAndFundAccount(netID bootstrap.NodeInfo) *nodeAcco signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(fundAccountTx, - []sdk.Address{s.blockchain.ServiceKey().Address}, + []sdk.Address{s.serviceAccountAddress}, []sdkcrypto.Signer{signer}, ) require.NoError(s.T(), err) @@ -307,10 +306,10 @@ func (s *EmulatorSuite) startDKGWithParticipants(accounts []*nodeAccount) { SetScript(templates.GenerateStartDKGScript(s.env)). SetComputeLimit(9999). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) err := startDKGTx.AddArgument(cadence.NewArray(valueNodeIDs)) @@ -318,7 +317,7 @@ func (s *EmulatorSuite) startDKGWithParticipants(accounts []*nodeAccount) { signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(startDKGTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) require.NoError(s.T(), err) @@ -334,7 +333,7 @@ func (s *EmulatorSuite) claimDKGParticipant(node *node) { SetScript(templates.GenerateCreateDKGParticipantScript(s.env)). SetComputeLimit(9999). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber, ). @@ -350,7 +349,7 @@ func (s *EmulatorSuite) claimDKGParticipant(node *node) { signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(createParticipantTx, - []sdk.Address{node.account.accountAddress, s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{node.account.accountAddress, s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{node.account.accountSigner, signer, s.dkgSigner}, ) require.NoError(s.T(), err) @@ -371,21 +370,21 @@ func (s *EmulatorSuite) sendDummyTx() (*flow.Block, error) { createAccountTx, err := sdktemplates.CreateAccount( []*sdk.AccountKey{test.AccountKeyGenerator().New()}, []sdktemplates.Contract{}, - s.blockchain.ServiceKey().Address) + s.serviceAccountAddress) if err != nil { return nil, err } createAccountTx. SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address) + SetPayer(s.serviceAccountAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) block, err := s.prepareAndSubmit(createAccountTx, - []sdk.Address{s.blockchain.ServiceKey().Address}, + []sdk.Address{s.serviceAccountAddress}, []sdkcrypto.Signer{signer}, ) return block, err diff --git a/integration/epochs/cluster_epoch_test.go b/integration/epochs/cluster_epoch_test.go index 953a684e3e7..fbee6b5af4b 100644 --- a/integration/epochs/cluster_epoch_test.go +++ b/integration/epochs/cluster_epoch_test.go @@ -13,13 +13,12 @@ import ( "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" - emulator "github.com/onflow/flow-emulator/emulator" - sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" sdktemplates "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go-sdk/test" + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" @@ -31,10 +30,10 @@ import ( type Suite struct { suite.Suite - env templates.Environment - blockchain *emulator.Blockchain - emulatorClient *utils.EmulatorClient - + env templates.Environment + blockchain *emulator.Blockchain + emulatorClient *utils.EmulatorClient + serviceAccountAddress sdk.Address // Quorum Certificate deployed account and address qcAddress sdk.Address qcAccountKey *sdk.AccountKey @@ -51,7 +50,7 @@ func (s *Suite) SetupTest() { ) s.Require().NoError(err) s.emulatorClient = utils.NewEmulatorClient(s.blockchain) - + s.serviceAccountAddress = sdk.Address(s.blockchain.ServiceKey().Address) // deploy epoch qc contract s.deployEpochQCContract() } @@ -104,16 +103,16 @@ func (s *Suite) PublishVoter() { publishVoterTx := sdk.NewTransaction(). SetScript(templates.GeneratePublishVoterScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.qcAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) s.SignAndSubmit(publishVoterTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.qcAddress}, + []sdk.Address{s.serviceAccountAddress, s.qcAddress}, []sdkcrypto.Signer{signer, s.qcSigner}) } @@ -124,9 +123,9 @@ func (s *Suite) StartVoting(clustering flow.ClusterList, clusterCount, nodesPerC startVotingTx := sdk.NewTransaction(). SetScript(templates.GenerateStartVotingScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.qcAddress) clusterIndices := make([]cadence.Value, 0, clusterCount) @@ -170,7 +169,7 @@ func (s *Suite) StartVoting(clustering flow.ClusterList, clusterCount, nodesPerC require.NoError(s.T(), err) s.SignAndSubmit(startVotingTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.qcAddress}, + []sdk.Address{s.serviceAccountAddress, s.qcAddress}, []sdkcrypto.Signer{signer, s.qcSigner}) } @@ -180,9 +179,9 @@ func (s *Suite) CreateVoterResource(address sdk.Address, nodeID flow.Identifier, registerVoterTx := sdk.NewTransaction(). SetScript(templates.GenerateCreateVoterScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(address) err := registerVoterTx.AddArgument(cadence.NewAddress(s.qcAddress)) @@ -202,7 +201,7 @@ func (s *Suite) CreateVoterResource(address sdk.Address, nodeID flow.Identifier, require.NoError(s.T(), err) s.SignAndSubmit(registerVoterTx, - []sdk.Address{s.blockchain.ServiceKey().Address, address}, + []sdk.Address{s.serviceAccountAddress, address}, []sdkcrypto.Signer{signer, nodeSigner}) } @@ -210,16 +209,16 @@ func (s *Suite) StopVoting() { tx := sdk.NewTransaction(). SetScript(templates.GenerateStopVotingScript(s.env)). SetComputeLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.qcAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) s.SignAndSubmit(tx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.qcAddress}, + []sdk.Address{s.serviceAccountAddress, s.qcAddress}, []sdkcrypto.Signer{signer, s.qcSigner}) } diff --git a/integration/go.mod b/integration/go.mod index 92956116a7e..949e50576be 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -24,7 +24,6 @@ require ( github.com/onflow/crypto v0.25.2 github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 github.com/onflow/flow-core-contracts/lib/go/templates v1.4.0 - github.com/onflow/flow-emulator v1.0.2-0.20241021223526-a545558d37a2 github.com/onflow/flow-go v0.38.0-preview.0.0.20241021221952-af9cd6e99de1 github.com/onflow/flow-go-sdk v1.2.2 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 @@ -33,10 +32,12 @@ require ( github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.46.0 + github.com/psiemens/graceland v1.0.0 github.com/rs/zerolog v1.29.0 github.com/stretchr/testify v1.9.0 go.einride.tech/pid v0.1.0 go.uber.org/atomic v1.11.0 + go.uber.org/mock v0.4.0 golang.org/x/exp v0.0.0-20240119083558-1b970713d09a golang.org/x/sync v0.8.0 google.golang.org/grpc v1.63.2 @@ -104,7 +105,6 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/cli v24.0.6+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect @@ -129,7 +129,6 @@ require ( github.com/gammazero/workerpool v1.1.2 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/glebarez/go-sqlite v1.22.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-kit/kit v0.12.0 // indirect @@ -141,7 +140,6 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.14.1 // indirect - github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -215,7 +213,6 @@ require ( github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect - github.com/logrusorgru/aurora v2.0.3+incompatible // indirect github.com/logrusorgru/aurora/v4 v4.0.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -249,7 +246,6 @@ require ( github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.2.2 // indirect github.com/onflow/flow-nft/lib/go/templates v1.2.1 // indirect - github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 // indirect github.com/onflow/sdks v0.6.0-preview.1 // indirect github.com/onflow/wal v1.0.2 // indirect github.com/onsi/ginkgo/v2 v2.13.2 // indirect @@ -267,14 +263,12 @@ require ( github.com/polydawn/refmt v0.89.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/psiemens/graceland v1.0.0 // indirect github.com/psiemens/sconfig v0.1.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-20 v0.4.1 // indirect github.com/quic-go/quic-go v0.40.1 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/rootless-containers/rootlesskit v1.1.1 // indirect @@ -327,7 +321,6 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect - go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.26.0 // indirect @@ -350,10 +343,6 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect lukechampine.com/blake3 v1.3.0 // indirect - modernc.org/libc v1.37.6 // indirect - modernc.org/mathutil v1.6.0 // indirect - modernc.org/memory v1.7.2 // indirect - modernc.org/sqlite v1.28.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/integration/go.sum b/integration/go.sum index 7e114711f03..c52c541a3ed 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -216,8 +216,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= @@ -289,8 +287,6 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= -github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= @@ -331,8 +327,6 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -626,8 +620,6 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -750,8 +742,6 @@ github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 h1:R86HaOuk6vpuECZ github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0/go.mod h1:9asTBnB6Tw2UlVVtQKyS/egYv3xr4zVlJnJ75z1dfac= github.com/onflow/flow-core-contracts/lib/go/templates v1.4.0 h1:u2DAG8pk0xFH7TwS70t1gSZ/FtIIZWMSNyiu4SeXBYg= github.com/onflow/flow-core-contracts/lib/go/templates v1.4.0/go.mod h1:pN768Al/wLRlf3bwugv9TyxniqJxMu4sxnX9eQJam64= -github.com/onflow/flow-emulator v1.0.2-0.20241021223526-a545558d37a2 h1:zs3/ctgI1KNFTcA1HpkDmyNuUybY/oFXDE1S+cYR9lU= -github.com/onflow/flow-emulator v1.0.2-0.20241021223526-a545558d37a2/go.mod h1:HgcZT9TZVOqmNGqN3fX+QqoiVovIHfLwqbXwuaEJiXE= github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= @@ -766,8 +756,6 @@ github.com/onflow/flow/protobuf/go/flow v0.4.7 h1:iP6DFx4wZ3ETORsyeqzHu7neFT3d1C github.com/onflow/flow/protobuf/go/flow v0.4.7/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.14.7 h1:gg3awYqI02e3AypRdpJKEvNTJ6kz/OhAqRti0h54Wlc= github.com/onflow/go-ethereum v1.14.7/go.mod h1:zV14QLrXyYu5ucvcwHUA0r6UaqveqbXaehAVQJlSW+I= -github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= -github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= github.com/onflow/sdks v0.6.0-preview.1/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= github.com/onflow/wal v1.0.2 h1:5bgsJVf2O3cfMNK12fiiTyYZ8cOrUiELt3heBJfHOhc= @@ -861,8 +849,6 @@ github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFD github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -1415,14 +1401,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= -modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= -modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= -modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= -modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= -modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= -modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= -modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= diff --git a/integration/internal/emulator/blockTicker.go b/integration/internal/emulator/blockTicker.go new file mode 100644 index 00000000000..40ef1ce7e34 --- /dev/null +++ b/integration/internal/emulator/blockTicker.go @@ -0,0 +1,56 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "time" +) + +type BlocksTicker struct { + emulator Emulator + ticker *time.Ticker + done chan bool +} + +func NewBlocksTicker( + emulator Emulator, + blockTime time.Duration, +) *BlocksTicker { + return &BlocksTicker{ + emulator: emulator, + ticker: time.NewTicker(blockTime), + done: make(chan bool, 1), + } +} + +func (t *BlocksTicker) Start() error { + for { + select { + case <-t.ticker.C: + _, _ = t.emulator.ExecuteBlock() + _, _ = t.emulator.CommitBlock() + case <-t.done: + return nil + } + } +} + +func (t *BlocksTicker) Stop() { + t.done <- true +} diff --git a/integration/internal/emulator/blockchain.go b/integration/internal/emulator/blockchain.go new file mode 100644 index 00000000000..b2198588ed3 --- /dev/null +++ b/integration/internal/emulator/blockchain.go @@ -0,0 +1,1077 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package emulator provides an emulated version of the Flow emulator that can be used +// for development purposes. +// +// This package can be used as a library or as a standalone application. +// +// When used as a library, this package provides tools to write programmatic tests for +// Flow applications. +// +// When used as a standalone application, this package implements the Flow Access API +// and is fully-compatible with Flow gRPC client libraries. +package emulator + +import ( + "context" + _ "embed" + "encoding/hex" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-core-contracts/lib/go/templates" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + reusableRuntime "github.com/onflow/flow-go/fvm/runtime" + flowgo "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" +) + +// systemChunkTransactionTemplate looks for the RandomBeaconHistory +// heartbeat resource on the service account and calls it. +// +//go:embed templates/systemChunkTransactionTemplate.cdc +var systemChunkTransactionTemplate string + +var _ Emulator = &Blockchain{} + +// New instantiates a new emulated emulator with the provided options. +func New(opts ...Option) (*Blockchain, error) { + + // apply options to the default config + conf := defaultConfig + for _, opt := range opts { + opt(&conf) + } + b := &Blockchain{ + storage: conf.GetStore(), + broadcaster: engine.NewBroadcaster(), + serviceKey: conf.GetServiceKey(), + conf: conf, + entropyProvider: &blockHashEntropyProvider{}, + } + return b.ReloadBlockchain() +} + +func (b *Blockchain) Now() time.Time { + if b.clockOverride != nil { + return b.clockOverride() + } + return time.Now().UTC() +} + +// Blockchain emulates the functionality of the Flow emulator. +type Blockchain struct { + // committed chain state: blocks, transactions, registers, events + storage EmulatorStorage + broadcaster *engine.Broadcaster + + // mutex protecting pending block + mu sync.RWMutex + + // pending block containing block info, register state, pending transactions + pendingBlock *pendingBlock + clockOverride func() time.Time + // used to execute transactions and scripts + vm *fvm.VirtualMachine + vmCtx fvm.Context + transactionValidator *access.TransactionValidator + serviceKey ServiceKey + conf config + entropyProvider *blockHashEntropyProvider +} + +func (b *Blockchain) Broadcaster() *engine.Broadcaster { + return b.broadcaster +} + +func (b *Blockchain) ReloadBlockchain() (*Blockchain, error) { + + b.vm = fvm.NewVirtualMachine() + b.vmCtx = fvm.NewContext( + fvm.WithLogger(b.conf.Logger), + fvm.WithCadenceLogging(true), + fvm.WithChain(b.conf.GetChainID().Chain()), + fvm.WithBlocks(b.storage), + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(!b.conf.ContractRemovalEnabled), + fvm.WithComputationLimit(b.conf.ScriptGasLimit), + fvm.WithAccountStorageLimit(b.conf.StorageLimitEnabled), + fvm.WithTransactionFeesEnabled(b.conf.TransactionFeesEnabled), + fvm.WithReusableCadenceRuntimePool( + reusableRuntime.NewReusableCadenceRuntimePool( + 0, + runtime.Config{ + AttachmentsEnabled: true, + }), + ), + fvm.WithEntropyProvider(b.entropyProvider), + fvm.WithEVMEnabled(true), + fvm.WithAuthorizationChecksEnabled(b.conf.TransactionValidationEnabled), + fvm.WithSequenceNumberCheckAndIncrementEnabled(b.conf.TransactionValidationEnabled), + ) + + latestBlock, latestLedger, err := configureLedger( + b.conf, + b.storage, + b.vm, + b.vmCtx) + if err != nil { + return nil, err + } + + b.pendingBlock = newPendingBlock(latestBlock, latestLedger, b.Now()) + err = b.configureTransactionValidator() + if err != nil { + return nil, err + } + + return b, nil +} + +func (b *Blockchain) EnableAutoMine() { + b.conf.AutoMine = true +} + +func (b *Blockchain) DisableAutoMine() { + b.conf.AutoMine = false +} + +func (b *Blockchain) Ping() error { + return nil +} + +func (b *Blockchain) GetChain() flowgo.Chain { + return b.vmCtx.Chain +} + +func (b *Blockchain) GetNetworkParameters() access.NetworkParameters { + return access.NetworkParameters{ + ChainID: b.GetChain().ChainID(), + } +} + +// `blockHashEntropyProvider implements `environment.EntropyProvider` +// which provides a source of entropy to fvm context (required for Cadence's randomness), +// by using the latest block hash. +type blockHashEntropyProvider struct { + LatestBlock flowgo.Identifier +} + +func (gen *blockHashEntropyProvider) RandomSource() ([]byte, error) { + return gen.LatestBlock[:], nil +} + +// make sure `blockHashEntropyProvider implements `environment.EntropyProvider` +var _ environment.EntropyProvider = &blockHashEntropyProvider{} + +func (b *Blockchain) configureTransactionValidator() error { + validator, err := access.NewTransactionValidator( + b.storage, + b.conf.GetChainID().Chain(), + metrics.NewNoopCollector(), + access.TransactionValidationOptions{ + Expiry: b.conf.TransactionExpiry, + ExpiryBuffer: 0, + AllowEmptyReferenceBlockID: b.conf.TransactionExpiry == 0, + AllowUnknownReferenceBlockID: false, + MaxGasLimit: b.conf.TransactionMaxGasLimit, + CheckScriptsParse: true, + MaxTransactionByteSize: flowgo.DefaultMaxTransactionByteSize, + MaxCollectionByteSize: flowgo.DefaultMaxCollectionByteSize, + CheckPayerBalanceMode: access.Disabled, + }, + nil, + ) + if err != nil { + return err + } + b.transactionValidator = validator + return nil +} + +func (b *Blockchain) setFVMContextFromHeader(header *flowgo.Header) fvm.Context { + b.vmCtx = fvm.NewContextFromParent( + b.vmCtx, + fvm.WithBlockHeader(header), + ) + return b.vmCtx +} + +// ServiceKey returns the service private key for this emulator. +func (b *Blockchain) ServiceKey() ServiceKey { + serviceAccount, err := b.getAccount(b.serviceKey.Address) + if err != nil { + return b.serviceKey + } + + if len(serviceAccount.Keys) > 0 { + b.serviceKey.Index = 0 + b.serviceKey.SequenceNumber = serviceAccount.Keys[0].SeqNumber + b.serviceKey.Weight = serviceAccount.Keys[0].Weight + } + + return b.serviceKey +} + +// PendingBlockID returns the ID of the pending block. +func (b *Blockchain) PendingBlockID() flowgo.Identifier { + return b.pendingBlock.ID() +} + +// PendingBlockView returns the view of the pending block. +func (b *Blockchain) PendingBlockView() uint64 { + return b.pendingBlock.view +} + +// PendingBlockTimestamp returns the Timestamp of the pending block. +func (b *Blockchain) PendingBlockTimestamp() time.Time { + return b.pendingBlock.Block().Header.Timestamp +} + +// GetLatestBlock gets the latest sealed block. +func (b *Blockchain) GetLatestBlock() (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getLatestBlock() +} + +func (b *Blockchain) getLatestBlock() (*flowgo.Block, error) { + block, err := b.storage.LatestBlock(context.Background()) + if err != nil { + return nil, err + } + + return &block, nil +} + +// GetBlockByID gets a block by ID. +func (b *Blockchain) GetBlockByID(id flowgo.Identifier) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getBlockByID(id) +} + +func (b *Blockchain) getBlockByID(id flowgo.Identifier) (*flowgo.Block, error) { + block, err := b.storage.BlockByID(context.Background(), id) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &BlockNotFoundByIDError{ID: id} + } + + return nil, err + } + + return block, nil +} + +// GetBlockByHeight gets a block by height. +func (b *Blockchain) GetBlockByHeight(height uint64) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, err := b.getBlockByHeight(height) + if err != nil { + return nil, err + } + + return block, nil +} + +func (b *Blockchain) getBlockByHeight(height uint64) (*flowgo.Block, error) { + block, err := b.storage.BlockByHeight(context.Background(), height) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &BlockNotFoundByHeightError{Height: height} + } + return nil, err + } + + return block, nil +} + +func (b *Blockchain) GetCollectionByID(colID flowgo.Identifier) (*flowgo.LightCollection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getCollectionByID(colID) +} + +func (b *Blockchain) getCollectionByID(colID flowgo.Identifier) (*flowgo.LightCollection, error) { + col, err := b.storage.CollectionByID(context.Background(), colID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &CollectionNotFoundError{ID: colID} + } + return nil, err + } + + return &col, nil +} + +func (b *Blockchain) GetFullCollectionByID(colID flowgo.Identifier) (*flowgo.Collection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getFullCollectionByID(colID) +} + +func (b *Blockchain) getFullCollectionByID(colID flowgo.Identifier) (*flowgo.Collection, error) { + col, err := b.storage.FullCollectionByID(context.Background(), colID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &CollectionNotFoundError{ID: colID} + } + return nil, err + } + + return &col, nil +} + +// GetTransaction gets an existing transaction by ID. +// +// The function first looks in the pending block, then the current emulator state. +func (b *Blockchain) GetTransaction(txID flowgo.Identifier) (*flowgo.TransactionBody, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getTransaction(txID) +} + +func (b *Blockchain) getTransaction(txID flowgo.Identifier) (*flowgo.TransactionBody, error) { + pendingTx := b.pendingBlock.GetTransaction(txID) + if pendingTx != nil { + return pendingTx, nil + } + + tx, err := b.storage.TransactionByID(context.Background(), txID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &TransactionNotFoundError{ID: txID} + } + return nil, err + } + + return &tx, nil +} + +func (b *Blockchain) GetTransactionResult(txID flowgo.Identifier) (*access.TransactionResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.getTransactionResult(txID) +} + +func (b *Blockchain) getTransactionResult(txID flowgo.Identifier) (*access.TransactionResult, error) { + if b.pendingBlock.ContainsTransaction(txID) { + return &access.TransactionResult{ + Status: flowgo.TransactionStatusPending, + }, nil + } + + storedResult, err := b.storage.TransactionResultByID(context.Background(), txID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return &access.TransactionResult{ + Status: flowgo.TransactionStatusUnknown, + }, nil + } + return nil, err + } + + statusCode := 0 + if storedResult.ErrorCode > 0 { + statusCode = 1 + } + result := access.TransactionResult{ + Status: flowgo.TransactionStatusSealed, + StatusCode: uint(statusCode), + ErrorMessage: storedResult.ErrorMessage, + Events: storedResult.Events, + TransactionID: txID, + BlockHeight: storedResult.BlockHeight, + BlockID: storedResult.BlockID, + } + + return &result, nil +} + +// GetAccountByIndex returns the account for the given address. +func (b *Blockchain) GetAccountByIndex(index uint) (*flowgo.Account, error) { + + address, err := b.vmCtx.Chain.ChainID().Chain().AddressAtIndex(uint64(index)) + if err != nil { + return nil, err + } + + latestBlock, err := b.getLatestBlock() + if err != nil { + return nil, err + } + return b.getAccountAtBlock(address, latestBlock.Header.Height) + +} + +// GetAccount returns the account for the given address. +func (b *Blockchain) GetAccount(address flowgo.Address) (*flowgo.Account, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getAccount(address) +} + +// getAccount returns the account for the given address. +func (b *Blockchain) getAccount(address flowgo.Address) (*flowgo.Account, error) { + latestBlock, err := b.getLatestBlock() + if err != nil { + return nil, err + } + return b.getAccountAtBlock(address, latestBlock.Header.Height) +} + +// GetAccountAtBlockHeight returns the account for the given address at specified block height. +func (b *Blockchain) GetAccountAtBlockHeight(address flowgo.Address, blockHeight uint64) (*flowgo.Account, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + account, err := b.getAccountAtBlock(address, blockHeight) + if err != nil { + return nil, err + } + + return account, nil +} + +// GetAccountAtBlock returns the account for the given address at specified block height. +func (b *Blockchain) getAccountAtBlock(address flowgo.Address, blockHeight uint64) (*flowgo.Account, error) { + ledger, err := b.storage.LedgerByHeight(context.Background(), blockHeight) + if err != nil { + return nil, err + } + + account, err := fvm.GetAccount(b.vmCtx, address, ledger) + if fvmerrors.IsAccountNotFoundError(err) { + return nil, &AccountNotFoundError{Address: address} + } + + return account, nil +} + +func (b *Blockchain) GetEventsForBlockIDs(eventType string, blockIDs []flowgo.Identifier) (result []flowgo.BlockEvents, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + for _, blockID := range blockIDs { + block, err := b.storage.BlockByID(context.Background(), blockID) + if err != nil { + break + } + events, err := b.storage.EventsByHeight(context.Background(), block.Header.Height, eventType) + if err != nil { + break + } + result = append(result, flowgo.BlockEvents{ + BlockID: block.ID(), + BlockHeight: block.Header.Height, + BlockTimestamp: block.Header.Timestamp, + Events: events, + }) + } + + return result, err +} + +func (b *Blockchain) GetEventsForHeightRange(eventType string, startHeight, endHeight uint64) (result []flowgo.BlockEvents, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + for blockHeight := startHeight; blockHeight <= endHeight; blockHeight++ { + block, err := b.storage.BlockByHeight(context.Background(), blockHeight) + if err != nil { + break + } + + events, err := b.storage.EventsByHeight(context.Background(), blockHeight, eventType) + if err != nil { + break + } + + result = append(result, flowgo.BlockEvents{ + BlockID: block.ID(), + BlockHeight: block.Header.Height, + BlockTimestamp: block.Header.Timestamp, + Events: events, + }) + } + + return result, err +} + +// GetEventsByHeight returns the events in the block at the given height, optionally filtered by type. +func (b *Blockchain) GetEventsByHeight(blockHeight uint64, eventType string) ([]flowgo.Event, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.storage.EventsByHeight(context.Background(), blockHeight, eventType) +} + +// SendTransaction submits a transaction to the network. +func (b *Blockchain) SendTransaction(flowTx *flowgo.TransactionBody) error { + b.mu.Lock() + defer b.mu.Unlock() + + err := b.addTransaction(*flowTx) + if err != nil { + return err + } + + if b.conf.AutoMine { + _, _, err := b.executeAndCommitBlock() + if err != nil { + return err + } + } + + return nil +} + +// AddTransaction validates a transaction and adds it to the current pending block. +func (b *Blockchain) AddTransaction(tx flowgo.TransactionBody) error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.addTransaction(tx) +} + +func (b *Blockchain) addTransaction(tx flowgo.TransactionBody) error { + + // If index > 0, pending block has begun execution (cannot add more transactions) + if b.pendingBlock.ExecutionStarted() { + return &PendingBlockMidExecutionError{BlockID: b.pendingBlock.ID()} + } + + if b.pendingBlock.ContainsTransaction(tx.ID()) { + return &DuplicateTransactionError{TxID: tx.ID()} + } + + _, err := b.storage.TransactionByID(context.Background(), tx.ID()) + if err == nil { + // Found the transaction, this is a duplicate + return &DuplicateTransactionError{TxID: tx.ID()} + } else if !errors.Is(err, ErrNotFound) { + // Error in the storage provider + return fmt.Errorf("failed to check storage for transaction %w", err) + } + + err = b.transactionValidator.Validate(context.Background(), &tx) + if err != nil { + return ConvertAccessError(err) + } + + // add transaction to pending block + b.pendingBlock.AddTransaction(tx) + + return nil +} + +// ExecuteBlock executes the remaining transactions in pending block. +func (b *Blockchain) ExecuteBlock() ([]*TransactionResult, error) { + b.mu.Lock() + defer b.mu.Unlock() + + return b.executeBlock() +} + +func (b *Blockchain) executeBlock() ([]*TransactionResult, error) { + results := make([]*TransactionResult, 0) + + // empty blocks do not require execution, treat as a no-op + if b.pendingBlock.Empty() { + return results, nil + } + + header := b.pendingBlock.Block().Header + blockContext := b.setFVMContextFromHeader(header) + + // cannot execute a block that has already executed + if b.pendingBlock.ExecutionComplete() { + return results, &PendingBlockTransactionsExhaustedError{ + BlockID: b.pendingBlock.ID(), + } + } + + // continue executing transactions until execution is complete + for !b.pendingBlock.ExecutionComplete() { + result, err := b.executeNextTransaction(blockContext) + if err != nil { + return results, err + } + + results = append(results, result) + } + + return results, nil +} + +// ExecuteNextTransaction executes the next indexed transaction in pending block. +func (b *Blockchain) ExecuteNextTransaction() (*TransactionResult, error) { + b.mu.Lock() + defer b.mu.Unlock() + + header := b.pendingBlock.Block().Header + blockContext := b.setFVMContextFromHeader(header) + return b.executeNextTransaction(blockContext) +} + +// executeNextTransaction is a helper function for ExecuteBlock and ExecuteNextTransaction that +// executes the next transaction in the pending block. +func (b *Blockchain) executeNextTransaction(ctx fvm.Context) (*TransactionResult, error) { + // check if there are remaining txs to be executed + if b.pendingBlock.ExecutionComplete() { + return nil, &PendingBlockTransactionsExhaustedError{ + BlockID: b.pendingBlock.ID(), + } + } + + txnBody := b.pendingBlock.NextTransaction() + txnId := txnBody.ID() + + // use the computer to execute the next transaction + output, err := b.pendingBlock.ExecuteNextTransaction(b.vm, ctx) + if err != nil { + // fail fast if fatal error occurs + return nil, err + } + + tr, err := VMTransactionResultToEmulator(txnId, output) + if err != nil { + // fail fast if fatal error occurs + return nil, err + } + + return tr, nil +} + +// CommitBlock seals the current pending block and saves it to storage. +// +// This function clears the pending transaction pool and resets the pending block. +func (b *Blockchain) CommitBlock() (*flowgo.Block, error) { + b.mu.Lock() + defer b.mu.Unlock() + + block, err := b.commitBlock() + if err != nil { + return nil, err + } + + return block, nil +} + +func (b *Blockchain) commitBlock() (*flowgo.Block, error) { + // pending block cannot be committed before execution starts (unless empty) + if !b.pendingBlock.ExecutionStarted() && !b.pendingBlock.Empty() { + return nil, &PendingBlockCommitBeforeExecutionError{BlockID: b.pendingBlock.ID()} + } + + // pending block cannot be committed before execution completes + if b.pendingBlock.ExecutionStarted() && !b.pendingBlock.ExecutionComplete() { + return nil, &PendingBlockMidExecutionError{BlockID: b.pendingBlock.ID()} + } + + block := b.pendingBlock.Block() + collections := b.pendingBlock.Collections() + transactions := b.pendingBlock.Transactions() + transactionResults, err := convertToSealedResults(b.pendingBlock.TransactionResults(), b.pendingBlock.ID(), b.pendingBlock.height) + if err != nil { + return nil, err + } + + // lastly we execute the system chunk transaction + err = b.executeSystemChunkTransaction() + if err != nil { + return nil, err + } + + executionSnapshot := b.pendingBlock.Finalize() + events := b.pendingBlock.Events() + + // commit the pending block to storage + err = b.storage.CommitBlock( + context.Background(), + *block, + collections, + transactions, + transactionResults, + executionSnapshot, + events) + if err != nil { + return nil, err + } + + ledger, err := b.storage.LedgerByHeight( + context.Background(), + block.Header.Height, + ) + if err != nil { + return nil, err + } + + // notify listeners on new block + b.broadcaster.Publish() + + // reset pending block using current block and ledger state + b.pendingBlock = newPendingBlock(block, ledger, b.Now()) + b.entropyProvider.LatestBlock = block.ID() + + return block, nil +} + +// ExecuteAndCommitBlock is a utility that combines ExecuteBlock with CommitBlock. +func (b *Blockchain) ExecuteAndCommitBlock() (*flowgo.Block, []*TransactionResult, error) { + b.mu.Lock() + defer b.mu.Unlock() + + return b.executeAndCommitBlock() +} + +// ExecuteAndCommitBlock is a utility that combines ExecuteBlock with CommitBlock. +func (b *Blockchain) executeAndCommitBlock() (*flowgo.Block, []*TransactionResult, error) { + + results, err := b.executeBlock() + if err != nil { + return nil, nil, err + } + + block, err := b.commitBlock() + if err != nil { + return nil, results, err + } + + blockID := block.ID() + b.conf.ServerLogger.Debug().Fields(map[string]any{ + "blockHeight": block.Header.Height, + "blockID": hex.EncodeToString(blockID[:]), + }).Msgf("📦 Block #%d committed", block.Header.Height) + + return block, results, nil +} + +// ResetPendingBlock clears the transactions in pending block. +func (b *Blockchain) ResetPendingBlock() error { + b.mu.Lock() + defer b.mu.Unlock() + + latestBlock, err := b.storage.LatestBlock(context.Background()) + if err != nil { + return err + } + + latestLedger, err := b.storage.LedgerByHeight( + context.Background(), + latestBlock.Header.Height, + ) + if err != nil { + return err + } + + // reset pending block using latest committed block and ledger state + b.pendingBlock = newPendingBlock(&latestBlock, latestLedger, b.Now()) + + return nil +} + +// ExecuteScript executes a read-only script against the world state and returns the result. +func (b *Blockchain) ExecuteScript( + script []byte, + arguments [][]byte, +) (*ScriptResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + latestBlock, err := b.getLatestBlock() + if err != nil { + return nil, err + } + + return b.executeScriptAtBlockID(script, arguments, latestBlock.Header.ID()) +} + +func (b *Blockchain) ExecuteScriptAtBlockID(script []byte, arguments [][]byte, id flowgo.Identifier) (*ScriptResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.executeScriptAtBlockID(script, arguments, id) +} + +func (b *Blockchain) executeScriptAtBlockID(script []byte, arguments [][]byte, id flowgo.Identifier) (*ScriptResult, error) { + requestedBlock, err := b.storage.BlockByID(context.Background(), id) + if err != nil { + return nil, err + } + + requestedLedgerSnapshot, err := b.storage.LedgerByHeight( + context.Background(), + requestedBlock.Header.Height, + ) + if err != nil { + return nil, err + } + + blockContext := fvm.NewContextFromParent( + b.vmCtx, + fvm.WithBlockHeader(requestedBlock.Header), + ) + + scriptProc := fvm.Script(script).WithArguments(arguments...) + + _, output, err := b.vm.Run( + blockContext, + scriptProc, + requestedLedgerSnapshot) + if err != nil { + return nil, err + } + + scriptID := flowgo.MakeIDFromFingerPrint(script) + + var scriptError error = nil + var convertedValue cadence.Value = nil + + if output.Err == nil { + convertedValue = output.Value + } else { + scriptError = VMErrorToEmulator(output.Err) + } + + scriptResult := &ScriptResult{ + ScriptID: scriptID, + Value: convertedValue, + Error: scriptError, + Logs: output.Logs, + Events: output.Events, + ComputationUsed: output.ComputationUsed, + MemoryEstimate: output.MemoryEstimate, + } + + return scriptResult, nil +} + +func (b *Blockchain) ExecuteScriptAtBlockHeight( + script []byte, + arguments [][]byte, + blockHeight uint64, +) (*ScriptResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + requestedBlock, err := b.getBlockByHeight(blockHeight) + if err != nil { + return nil, err + } + + return b.executeScriptAtBlockID(script, arguments, requestedBlock.Header.ID()) +} + +func convertToSealedResults( + results map[flowgo.Identifier]IndexedTransactionResult, + blockID flowgo.Identifier, + blockHeight uint64, +) (map[flowgo.Identifier]*StorableTransactionResult, error) { + + output := make(map[flowgo.Identifier]*StorableTransactionResult) + + for id, result := range results { + temp, err := ToStorableResult(result.ProcedureOutput, blockID, blockHeight) + if err != nil { + return nil, err + } + output[id] = &temp + } + + return output, nil +} + +func (b *Blockchain) GetTransactionsByBlockID(blockID flowgo.Identifier) ([]*flowgo.TransactionBody, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, err := b.getBlockByID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + var transactions []*flowgo.TransactionBody + for i, guarantee := range block.Payload.Guarantees { + c, err := b.getCollectionByID(guarantee.CollectionID) + if err != nil { + return nil, fmt.Errorf("failed to get collection [%d] %s: %w", i, guarantee.CollectionID, err) + } + + for j, txID := range c.Transactions { + tx, err := b.getTransaction(txID) + if err != nil { + return nil, fmt.Errorf("failed to get transaction [%d] %s: %w", j, txID, err) + } + transactions = append(transactions, tx) + } + } + return transactions, nil +} + +func (b *Blockchain) GetTransactionResultsByBlockID(blockID flowgo.Identifier) ([]*access.TransactionResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, err := b.getBlockByID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + var results []*access.TransactionResult + for i, guarantee := range block.Payload.Guarantees { + c, err := b.getCollectionByID(guarantee.CollectionID) + if err != nil { + return nil, fmt.Errorf("failed to get collection [%d] %s: %w", i, guarantee.CollectionID, err) + } + + for j, txID := range c.Transactions { + result, err := b.getTransactionResult(txID) + if err != nil { + return nil, fmt.Errorf("failed to get transaction result [%d] %s: %w", j, txID, err) + } + results = append(results, result) + } + } + return results, nil +} + +func (b *Blockchain) GetLogs(identifier flowgo.Identifier) ([]string, error) { + txResult, err := b.storage.TransactionResultByID(context.Background(), identifier) + if err != nil { + return nil, err + + } + return txResult.Logs, nil +} + +// SetClock sets the given clock on blockchain's pending block. +func (b *Blockchain) SetClock(clock func() time.Time) { + b.clockOverride = clock + b.pendingBlock.SetTimestamp(clock()) +} + +// NewScriptEnvironment returns an environment.Environment by +// using as a storage snapshot the blockchain's ledger state. +// Useful for tools that use the emulator's blockchain as a library. +func (b *Blockchain) NewScriptEnvironment() environment.Environment { + return environment.NewScriptEnvironmentFromStorageSnapshot( + b.vmCtx.EnvironmentParams, + b.pendingBlock.ledgerState.NewChild(), + ) +} + +func (b *Blockchain) systemChunkTransaction() (*flowgo.TransactionBody, error) { + serviceAddress := b.GetChain().ServiceAddress() + + script := templates.ReplaceAddresses( + systemChunkTransactionTemplate, + templates.Environment{ + RandomBeaconHistoryAddress: serviceAddress.Hex(), + }, + ) + + // TODO: move this to `templates.Environment` struct + script = strings.ReplaceAll( + script, + `import EVM from "EVM"`, + fmt.Sprintf( + "import EVM from %s", + serviceAddress.HexWithPrefix(), + ), + ) + + tx := flowgo.NewTransactionBody(). + SetScript([]byte(script)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + AddAuthorizer(serviceAddress). + SetPayer(serviceAddress). + SetReferenceBlockID(b.pendingBlock.parentID) + + return tx, nil +} + +func (b *Blockchain) executeSystemChunkTransaction() error { + txn, err := b.systemChunkTransaction() + if err != nil { + return err + } + ctx := fvm.NewContextFromParent( + b.vmCtx, + fvm.WithLogger(zerolog.Nop()), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithRandomSourceHistoryCallAllowed(true), + fvm.WithBlockHeader(b.pendingBlock.Block().Header), + ) + + executionSnapshot, output, err := b.vm.Run( + ctx, + fvm.Transaction(txn, uint32(len(b.pendingBlock.Transactions()))), + b.pendingBlock.ledgerState, + ) + if err != nil { + return err + } + + if output.Err != nil { + return output.Err + } + + b.pendingBlock.events = append(b.pendingBlock.events, output.Events...) + + err = b.pendingBlock.ledgerState.Merge(executionSnapshot) + if err != nil { + return err + } + + return nil +} + +func (b *Blockchain) GetRegisterValues(registerIDs flowgo.RegisterIDs, height uint64) (values []flowgo.RegisterValue, err error) { + ledger, err := b.storage.LedgerByHeight(context.Background(), height) + if err != nil { + return nil, err + } + for _, registerID := range registerIDs { + value, err := ledger.Get(registerID) + if err != nil { + return nil, err + } + values = append(values, value) + } + return values, nil +} diff --git a/integration/internal/emulator/config.go b/integration/internal/emulator/config.go new file mode 100644 index 00000000000..d87be0a162c --- /dev/null +++ b/integration/internal/emulator/config.go @@ -0,0 +1,274 @@ +package emulator + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/meter" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// config is a set of configuration options for an emulated emulator. +type config struct { + ServiceKey *ServiceKey + Store EmulatorStorage + SimpleAddresses bool + GenesisTokenSupply cadence.UFix64 + TransactionMaxGasLimit uint64 + ScriptGasLimit uint64 + TransactionExpiry uint + StorageLimitEnabled bool + TransactionFeesEnabled bool + ExecutionEffortWeights meter.ExecutionEffortWeights + ContractRemovalEnabled bool + MinimumStorageReservation cadence.UFix64 + StorageMBPerFLOW cadence.UFix64 + Logger zerolog.Logger + ServerLogger zerolog.Logger + TransactionValidationEnabled bool + ChainID flowgo.ChainID + AutoMine bool +} + +const defaultGenesisTokenSupply = "1000000000.0" +const defaultScriptGasLimit = 100000 +const defaultTransactionMaxGasLimit = flowgo.DefaultMaxTransactionGasLimit + +// defaultConfig is the default configuration for an emulated emulator. +var defaultConfig = func() config { + genesisTokenSupply, err := cadence.NewUFix64(defaultGenesisTokenSupply) + if err != nil { + panic(fmt.Sprintf("Failed to parse default genesis token supply: %s", err.Error())) + } + + return config{ + ServiceKey: DefaultServiceKey(), + Store: nil, + SimpleAddresses: false, + GenesisTokenSupply: genesisTokenSupply, + ScriptGasLimit: defaultScriptGasLimit, + TransactionMaxGasLimit: defaultTransactionMaxGasLimit, + MinimumStorageReservation: fvm.DefaultMinimumStorageReservation, + StorageMBPerFLOW: fvm.DefaultStorageMBPerFLOW, + TransactionExpiry: 0, // TODO: replace with sensible default + StorageLimitEnabled: true, + Logger: zerolog.Nop(), + ServerLogger: zerolog.Nop(), + TransactionValidationEnabled: true, + ChainID: flowgo.Emulator, + AutoMine: false, + } +}() + +func (conf config) GetStore() EmulatorStorage { + if conf.Store == nil { + conf.Store = NewMemoryStore() + } + return conf.Store +} + +func (conf config) GetChainID() flowgo.ChainID { + if conf.SimpleAddresses { + return flowgo.MonotonicEmulator + } + return conf.ChainID +} + +func (conf config) GetServiceKey() ServiceKey { + // set up service key + serviceKey := conf.ServiceKey + if serviceKey == nil { + serviceKey = DefaultServiceKey() + } + serviceKey.Address = conf.GetChainID().Chain().ServiceAddress() + serviceKey.Weight = fvm.AccountKeyWeightThreshold + return *serviceKey +} + +// Option is a function applying a change to the emulator config. +type Option func(*config) + +// WithLogger sets the fvm logger +func WithLogger( + logger zerolog.Logger, +) Option { + return func(c *config) { + c.Logger = logger + } +} + +// WithServerLogger sets the logger +func WithServerLogger( + logger zerolog.Logger, +) Option { + return func(c *config) { + c.ServerLogger = logger + } +} + +// WithServicePublicKey sets the service key from a public key. +func WithServicePublicKey( + servicePublicKey crypto.PublicKey, + sigAlgo crypto.SigningAlgorithm, + hashAlgo hash.HashingAlgorithm, +) Option { + return func(c *config) { + c.ServiceKey = &ServiceKey{ + PublicKey: servicePublicKey, + SigAlgo: sigAlgo, + HashAlgo: hashAlgo, + } + } +} + +// WithServicePrivateKey sets the service key from private key. +func WithServicePrivateKey( + privateKey crypto.PrivateKey, + sigAlgo crypto.SigningAlgorithm, + hashAlgo hash.HashingAlgorithm, +) Option { + return func(c *config) { + c.ServiceKey = &ServiceKey{ + PrivateKey: privateKey, + PublicKey: privateKey.PublicKey(), + HashAlgo: hashAlgo, + SigAlgo: sigAlgo, + } + } +} + +// WithStore sets the persistent storage provider. +func WithStore(store EmulatorStorage) Option { + return func(c *config) { + c.Store = store + } +} + +// WithSimpleAddresses enables simple addresses, which are sequential starting with 0x01. +func WithSimpleAddresses() Option { + return func(c *config) { + c.SimpleAddresses = true + } +} + +// WithGenesisTokenSupply sets the genesis token supply. +func WithGenesisTokenSupply(supply cadence.UFix64) Option { + return func(c *config) { + c.GenesisTokenSupply = supply + } +} + +// WithTransactionMaxGasLimit sets the maximum gas limit for transactions. +// +// Individual transactions will still be bounded by the limit they declare. +// This function sets the maximum limit that any transaction can declare. +// +// This limit does not affect script executions. Use WithScriptGasLimit +// to set the gas limit for script executions. +func WithTransactionMaxGasLimit(maxLimit uint64) Option { + return func(c *config) { + c.TransactionMaxGasLimit = maxLimit + } +} + +// WithScriptGasLimit sets the gas limit for scripts. +// +// This limit does not affect transactions, which declare their own limit. +// Use WithTransactionMaxGasLimit to set the maximum gas limit for transactions. +func WithScriptGasLimit(limit uint64) Option { + return func(c *config) { + c.ScriptGasLimit = limit + } +} + +// WithTransactionExpiry sets the transaction expiry measured in blocks. +// +// If set to zero, transaction expiry is disabled and the reference block ID field +// is not required. +func WithTransactionExpiry(expiry uint) Option { + return func(c *config) { + c.TransactionExpiry = expiry + } +} + +// WithStorageLimitEnabled enables/disables limiting account storage used to their storage capacity. +// +// If set to false, accounts can store any amount of data, +// otherwise they can only store as much as their storage capacity. +// The default is true. +func WithStorageLimitEnabled(enabled bool) Option { + return func(c *config) { + c.StorageLimitEnabled = enabled + } +} + +// WithMinimumStorageReservation sets the minimum account balance. +// +// The cost of creating new accounts is also set to this value. +// The default is taken from fvm.DefaultMinimumStorageReservation +func WithMinimumStorageReservation(minimumStorageReservation cadence.UFix64) Option { + return func(c *config) { + c.MinimumStorageReservation = minimumStorageReservation + } +} + +// WithStorageMBPerFLOW sets the cost of a megabyte of storage in FLOW +// +// the default is taken from fvm.DefaultStorageMBPerFLOW +func WithStorageMBPerFLOW(storageMBPerFLOW cadence.UFix64) Option { + return func(c *config) { + c.StorageMBPerFLOW = storageMBPerFLOW + } +} + +// WithTransactionFeesEnabled enables/disables transaction fees. +// +// If set to false transactions don't cost any flow. +// The default is false. +func WithTransactionFeesEnabled(enabled bool) Option { + return func(c *config) { + c.TransactionFeesEnabled = enabled + } +} + +// WithExecutionEffortWeights sets the execution effort weights. +// default is the Mainnet values. +func WithExecutionEffortWeights(weights meter.ExecutionEffortWeights) Option { + return func(c *config) { + c.ExecutionEffortWeights = weights + } +} + +// WithContractRemovalEnabled restricts/allows removal of already deployed contracts. +// +// The default is provided by on-chain value. +func WithContractRemovalEnabled(enabled bool) Option { + return func(c *config) { + c.ContractRemovalEnabled = enabled + } +} + +// WithTransactionValidationEnabled enables/disables transaction validation. +// +// If set to false, the emulator will not verify transaction signatures or validate sequence numbers. +// +// The default is true. +func WithTransactionValidationEnabled(enabled bool) Option { + return func(c *config) { + c.TransactionValidationEnabled = enabled + } +} + +// WithChainID sets chain type for address generation +// The default is emulator. +func WithChainID(chainID flowgo.ChainID) Option { + return func(c *config) { + c.ChainID = chainID + } +} diff --git a/integration/internal/emulator/convert.go b/integration/internal/emulator/convert.go new file mode 100644 index 00000000000..f9f8f1dfb6a --- /dev/null +++ b/integration/internal/emulator/convert.go @@ -0,0 +1,377 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/fvm" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func SDKIdentifierToFlow(sdkIdentifier sdk.Identifier) flowgo.Identifier { + return flowgo.Identifier(sdkIdentifier) +} + +func SDKIdentifiersToFlow(sdkIdentifiers []sdk.Identifier) []flowgo.Identifier { + ret := make([]flowgo.Identifier, len(sdkIdentifiers)) + for i, sdkIdentifier := range sdkIdentifiers { + ret[i] = SDKIdentifierToFlow(sdkIdentifier) + } + return ret +} + +func FlowIdentifierToSDK(flowIdentifier flowgo.Identifier) sdk.Identifier { + return sdk.Identifier(flowIdentifier) +} + +func FlowIdentifiersToSDK(flowIdentifiers []flowgo.Identifier) []sdk.Identifier { + ret := make([]sdk.Identifier, len(flowIdentifiers)) + for i, flowIdentifier := range flowIdentifiers { + ret[i] = FlowIdentifierToSDK(flowIdentifier) + } + return ret +} + +func SDKProposalKeyToFlow(sdkProposalKey sdk.ProposalKey) flowgo.ProposalKey { + return flowgo.ProposalKey{ + Address: SDKAddressToFlow(sdkProposalKey.Address), + KeyIndex: sdkProposalKey.KeyIndex, + SequenceNumber: sdkProposalKey.SequenceNumber, + } +} + +func FlowProposalKeyToSDK(flowProposalKey flowgo.ProposalKey) sdk.ProposalKey { + return sdk.ProposalKey{ + Address: FlowAddressToSDK(flowProposalKey.Address), + KeyIndex: flowProposalKey.KeyIndex, + SequenceNumber: flowProposalKey.SequenceNumber, + } +} + +func SDKAddressToFlow(sdkAddress sdk.Address) flowgo.Address { + return flowgo.Address(sdkAddress) +} + +func FlowAddressToSDK(flowAddress flowgo.Address) sdk.Address { + return sdk.Address(flowAddress) +} + +func SDKAddressesToFlow(sdkAddresses []sdk.Address) []flowgo.Address { + ret := make([]flowgo.Address, len(sdkAddresses)) + for i, sdkAddress := range sdkAddresses { + ret[i] = SDKAddressToFlow(sdkAddress) + } + return ret +} + +func FlowAddressesToSDK(flowAddresses []flowgo.Address) []sdk.Address { + ret := make([]sdk.Address, len(flowAddresses)) + for i, flowAddress := range flowAddresses { + ret[i] = FlowAddressToSDK(flowAddress) + } + return ret +} + +func SDKTransactionSignatureToFlow(sdkTransactionSignature sdk.TransactionSignature) flowgo.TransactionSignature { + return flowgo.TransactionSignature{ + Address: SDKAddressToFlow(sdkTransactionSignature.Address), + SignerIndex: sdkTransactionSignature.SignerIndex, + KeyIndex: sdkTransactionSignature.KeyIndex, + Signature: sdkTransactionSignature.Signature, + } +} + +func FlowTransactionSignatureToSDK(flowTransactionSignature flowgo.TransactionSignature) sdk.TransactionSignature { + return sdk.TransactionSignature{ + Address: FlowAddressToSDK(flowTransactionSignature.Address), + SignerIndex: flowTransactionSignature.SignerIndex, + KeyIndex: flowTransactionSignature.KeyIndex, + Signature: flowTransactionSignature.Signature, + } +} + +func SDKTransactionSignaturesToFlow(sdkTransactionSignatures []sdk.TransactionSignature) []flowgo.TransactionSignature { + ret := make([]flowgo.TransactionSignature, len(sdkTransactionSignatures)) + for i, sdkTransactionSignature := range sdkTransactionSignatures { + ret[i] = SDKTransactionSignatureToFlow(sdkTransactionSignature) + } + return ret +} + +func FlowTransactionSignaturesToSDK(flowTransactionSignatures []flowgo.TransactionSignature) []sdk.TransactionSignature { + ret := make([]sdk.TransactionSignature, len(flowTransactionSignatures)) + for i, flowTransactionSignature := range flowTransactionSignatures { + ret[i] = FlowTransactionSignatureToSDK(flowTransactionSignature) + } + return ret +} + +func SDKTransactionToFlow(sdkTx sdk.Transaction) *flowgo.TransactionBody { + return &flowgo.TransactionBody{ + ReferenceBlockID: SDKIdentifierToFlow(sdkTx.ReferenceBlockID), + Script: sdkTx.Script, + Arguments: sdkTx.Arguments, + GasLimit: sdkTx.GasLimit, + ProposalKey: SDKProposalKeyToFlow(sdkTx.ProposalKey), + Payer: SDKAddressToFlow(sdkTx.Payer), + Authorizers: SDKAddressesToFlow(sdkTx.Authorizers), + PayloadSignatures: SDKTransactionSignaturesToFlow(sdkTx.PayloadSignatures), + EnvelopeSignatures: SDKTransactionSignaturesToFlow(sdkTx.EnvelopeSignatures), + } +} + +func FlowTransactionToSDK(flowTx flowgo.TransactionBody) sdk.Transaction { + transaction := sdk.Transaction{ + ReferenceBlockID: FlowIdentifierToSDK(flowTx.ReferenceBlockID), + Script: flowTx.Script, + Arguments: flowTx.Arguments, + GasLimit: flowTx.GasLimit, + ProposalKey: FlowProposalKeyToSDK(flowTx.ProposalKey), + Payer: FlowAddressToSDK(flowTx.Payer), + Authorizers: FlowAddressesToSDK(flowTx.Authorizers), + PayloadSignatures: FlowTransactionSignaturesToSDK(flowTx.PayloadSignatures), + EnvelopeSignatures: FlowTransactionSignaturesToSDK(flowTx.EnvelopeSignatures), + } + return transaction +} + +func FlowTransactionResultToSDK(result *access.TransactionResult) (*sdk.TransactionResult, error) { + + events, err := FlowEventsToSDK(result.Events) + if err != nil { + return nil, err + } + + if result.ErrorMessage != "" { + err = &ExecutionError{Code: int(result.StatusCode), Message: result.ErrorMessage} + } + + sdkResult := &sdk.TransactionResult{ + Status: sdk.TransactionStatus(result.Status), + Error: err, + Events: events, + TransactionID: sdk.Identifier(result.TransactionID), + BlockHeight: result.BlockHeight, + BlockID: sdk.Identifier(result.BlockID), + } + + return sdkResult, nil +} + +func SDKEventToFlow(event sdk.Event) (flowgo.Event, error) { + payload, err := ccf.EventsEncMode.Encode(event.Value) + if err != nil { + return flowgo.Event{}, err + } + + return flowgo.Event{ + Type: flowgo.EventType(event.Type), + TransactionID: SDKIdentifierToFlow(event.TransactionID), + TransactionIndex: uint32(event.TransactionIndex), + EventIndex: uint32(event.EventIndex), + Payload: payload, + }, nil +} + +func FlowEventToSDK(flowEvent flowgo.Event) (sdk.Event, error) { + cadenceValue, err := ccf.EventsDecMode.Decode(nil, flowEvent.Payload) + if err != nil { + return sdk.Event{}, err + } + + cadenceEvent, ok := cadenceValue.(cadence.Event) + if !ok { + return sdk.Event{}, fmt.Errorf("cadence value not of type event: %s", cadenceValue) + } + + return sdk.Event{ + Type: string(flowEvent.Type), + TransactionID: FlowIdentifierToSDK(flowEvent.TransactionID), + TransactionIndex: int(flowEvent.TransactionIndex), + EventIndex: int(flowEvent.EventIndex), + Value: cadenceEvent, + }, nil +} + +func FlowEventsToSDK(flowEvents []flowgo.Event) ([]sdk.Event, error) { + ret := make([]sdk.Event, len(flowEvents)) + var err error + for i, flowEvent := range flowEvents { + ret[i], err = FlowEventToSDK(flowEvent) + if err != nil { + return nil, err + } + } + return ret, nil +} + +func FlowAccountPublicKeyToSDK(flowPublicKey flowgo.AccountPublicKey, index uint32) (sdk.AccountKey, error) { + + return sdk.AccountKey{ + Index: index, + PublicKey: flowPublicKey.PublicKey, + SigAlgo: flowPublicKey.SignAlgo, + HashAlgo: flowPublicKey.HashAlgo, + Weight: flowPublicKey.Weight, + SequenceNumber: flowPublicKey.SeqNumber, + Revoked: flowPublicKey.Revoked, + }, nil +} + +func SDKAccountKeyToFlow(key *sdk.AccountKey) (flowgo.AccountPublicKey, error) { + + return flowgo.AccountPublicKey{ + Index: key.Index, + PublicKey: key.PublicKey, + SignAlgo: key.SigAlgo, + HashAlgo: key.HashAlgo, + Weight: key.Weight, + SeqNumber: key.SequenceNumber, + Revoked: key.Revoked, + }, nil +} + +func SDKAccountKeysToFlow(keys []*sdk.AccountKey) ([]flowgo.AccountPublicKey, error) { + accountKeys := make([]flowgo.AccountPublicKey, len(keys)) + + for i, key := range keys { + accountKey, err := SDKAccountKeyToFlow(key) + if err != nil { + return nil, err + } + + accountKeys[i] = accountKey + } + + return accountKeys, nil +} + +func FlowAccountPublicKeysToSDK(flowPublicKeys []flowgo.AccountPublicKey) ([]*sdk.AccountKey, error) { + ret := make([]*sdk.AccountKey, len(flowPublicKeys)) + for i, flowPublicKey := range flowPublicKeys { + v, err := FlowAccountPublicKeyToSDK(flowPublicKey, uint32(i)) + if err != nil { + return nil, err + } + + ret[i] = &v + } + return ret, nil +} + +func FlowAccountToSDK(flowAccount flowgo.Account) (*sdk.Account, error) { + sdkPublicKeys, err := FlowAccountPublicKeysToSDK(flowAccount.Keys) + if err != nil { + return &sdk.Account{}, err + } + + return &sdk.Account{ + Address: FlowAddressToSDK(flowAccount.Address), + Balance: flowAccount.Balance, + Code: nil, + Keys: sdkPublicKeys, + Contracts: flowAccount.Contracts, + }, nil +} + +func SDKAccountToFlow(account *sdk.Account) (*flowgo.Account, error) { + keys, err := SDKAccountKeysToFlow(account.Keys) + if err != nil { + return nil, err + } + + return &flowgo.Account{ + Address: SDKAddressToFlow(account.Address), + Balance: account.Balance, + Keys: keys, + Contracts: account.Contracts, + }, nil +} + +func FlowLightCollectionToSDK(flowCollection flowgo.LightCollection) sdk.Collection { + return sdk.Collection{ + TransactionIDs: FlowIdentifiersToSDK(flowCollection.Transactions), + } +} + +func VMTransactionResultToEmulator( + txnId flowgo.Identifier, + output fvm.ProcedureOutput, +) ( + *TransactionResult, + error, +) { + txID := FlowIdentifierToSDK(txnId) + + sdkEvents, err := FlowEventsToSDK(output.Events) + if err != nil { + return nil, err + } + + return &TransactionResult{ + TransactionID: txID, + ComputationUsed: output.ComputationUsed, + MemoryEstimate: output.MemoryEstimate, + Error: VMErrorToEmulator(output.Err), + Logs: output.Logs, + Events: sdkEvents, + }, nil +} + +func VMErrorToEmulator(vmError fvmerrors.CodedError) error { + if vmError == nil { + return nil + } + + return &FVMError{FlowError: vmError} +} + +func ToStorableResult( + output fvm.ProcedureOutput, + blockID flowgo.Identifier, + blockHeight uint64, +) ( + StorableTransactionResult, + error, +) { + var errorCode int + var errorMessage string + + if output.Err != nil { + errorCode = int(output.Err.Code()) + errorMessage = output.Err.Error() + } + + return StorableTransactionResult{ + BlockID: blockID, + BlockHeight: blockHeight, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + Logs: output.Logs, + Events: output.Events, + }, nil +} diff --git a/integration/internal/emulator/doc.go b/integration/internal/emulator/doc.go new file mode 100644 index 00000000000..416e7d84b3c --- /dev/null +++ b/integration/internal/emulator/doc.go @@ -0,0 +1,11 @@ +// Package emulator is a minimal version of the Flow Emulator (https://github.com/onflow/flow-emulator) +// for use within some integration tests for flow-go. +// Using an Emulator is desirable for test cases where: +// - we don't want to, or can't, run the test case against a local Docker network (package integration/testnet) +// - we want the test to include execution of smart contract code in a realistic environment +// +// Before using this package, flow-go's integration tests used the Flow Emulator directly. +// This created a repository-wise circular dependency and complicated version upgrades (see https://github.com/onflow/flow-go/issues/2863). +// The main purpose for this package is to replace that dependency with minimal ongoing +// maintenance overhead. +package emulator diff --git a/integration/internal/emulator/emulator.go b/integration/internal/emulator/emulator.go new file mode 100644 index 00000000000..91d59be517b --- /dev/null +++ b/integration/internal/emulator/emulator.go @@ -0,0 +1,173 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "fmt" + + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/access" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// SignatureAlgorithm is an identifier for a signature algorithm (and parameters if applicable). +type SignatureAlgorithm = crypto.SigningAlgorithm + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = crypto.UnknownSigningAlgorithm + // ECDSA_P256 is ECDSA on NIST P-256 curve + ECDSA_P256 = crypto.ECDSAP256 + // ECDSA_secp256k1 is ECDSA on secp256k1 curve + ECDSA_secp256k1 = crypto.ECDSASecp256k1 + // BLS_BLS12_381 is BLS on BLS12-381 curve + BLS_BLS12_381 = crypto.BLSBLS12381 +) + +// StringToSignatureAlgorithm converts a string to a SignatureAlgorithm. +func StringToSignatureAlgorithm(s string) SignatureAlgorithm { + switch s { + case ECDSA_P256.String(): + return ECDSA_P256 + case ECDSA_secp256k1.String(): + return ECDSA_secp256k1 + case BLS_BLS12_381.String(): + return BLS_BLS12_381 + default: + return UnknownSignatureAlgorithm + } +} + +type ServiceKey struct { + Index uint32 + Address flowgo.Address + SequenceNumber uint64 + PrivateKey crypto.PrivateKey + PublicKey crypto.PublicKey + HashAlgo hash.HashingAlgorithm + SigAlgo SignatureAlgorithm + Weight int +} + +const defaultServiceKeyPrivateKeySeed = "elephant ears space cowboy octopus rodeo potato cannon pineapple" +const DefaultServiceKeySigAlgo = sdkcrypto.ECDSA_P256 +const DefaultServiceKeyHashAlgo = sdkcrypto.SHA3_256 + +func DefaultServiceKey() *ServiceKey { + return GenerateDefaultServiceKey(DefaultServiceKeySigAlgo, DefaultServiceKeyHashAlgo) +} + +func GenerateDefaultServiceKey( + sigAlgo crypto.SigningAlgorithm, + hashAlgo hash.HashingAlgorithm, +) *ServiceKey { + privateKey, err := crypto.GeneratePrivateKey( + sigAlgo, + []byte(defaultServiceKeyPrivateKeySeed), + ) + if err != nil { + panic(fmt.Sprintf("Failed to generate default service key: %s", err.Error())) + } + + return &ServiceKey{ + PrivateKey: privateKey, + PublicKey: privateKey.PublicKey(), + SigAlgo: sigAlgo, + HashAlgo: hashAlgo, + } +} + +func (s ServiceKey) Signer() (sdkcrypto.Signer, error) { + return sdkcrypto.NewInMemorySigner(s.PrivateKey, s.HashAlgo) +} + +func (s ServiceKey) AccountKey() (crypto.PublicKey, crypto.PrivateKey) { + + var publicKey crypto.PublicKey + if s.PublicKey != nil { + publicKey = s.PublicKey + } + + if s.PrivateKey != nil { + publicKey = s.PrivateKey.PublicKey() + } + + return publicKey, s.PrivateKey + +} + +type AccessProvider interface { + Ping() error + GetNetworkParameters() access.NetworkParameters + + GetLatestBlock() (*flowgo.Block, error) + GetBlockByID(id flowgo.Identifier) (*flowgo.Block, error) + GetBlockByHeight(height uint64) (*flowgo.Block, error) + + GetCollectionByID(colID flowgo.Identifier) (*flowgo.LightCollection, error) + GetFullCollectionByID(colID flowgo.Identifier) (*flowgo.Collection, error) + + GetTransaction(txID flowgo.Identifier) (*flowgo.TransactionBody, error) + GetTransactionResult(txID flowgo.Identifier) (*access.TransactionResult, error) + GetTransactionsByBlockID(blockID flowgo.Identifier) ([]*flowgo.TransactionBody, error) + GetTransactionResultsByBlockID(blockID flowgo.Identifier) ([]*access.TransactionResult, error) + + GetAccount(address flowgo.Address) (*flowgo.Account, error) + GetAccountAtBlockHeight(address flowgo.Address, blockHeight uint64) (*flowgo.Account, error) + GetAccountByIndex(uint) (*flowgo.Account, error) + + GetEventsByHeight(blockHeight uint64, eventType string) ([]flowgo.Event, error) + GetEventsForBlockIDs(eventType string, blockIDs []flowgo.Identifier) ([]flowgo.BlockEvents, error) + GetEventsForHeightRange(eventType string, startHeight, endHeight uint64) ([]flowgo.BlockEvents, error) + + ExecuteScript(script []byte, arguments [][]byte) (*ScriptResult, error) + ExecuteScriptAtBlockHeight(script []byte, arguments [][]byte, blockHeight uint64) (*ScriptResult, error) + ExecuteScriptAtBlockID(script []byte, arguments [][]byte, id flowgo.Identifier) (*ScriptResult, error) + + SendTransaction(tx *flowgo.TransactionBody) error + AddTransaction(tx flowgo.TransactionBody) error +} + +type AutoMineCapable interface { + EnableAutoMine() + DisableAutoMine() +} + +type ExecutionCapable interface { + ExecuteAndCommitBlock() (*flowgo.Block, []*TransactionResult, error) + ExecuteNextTransaction() (*TransactionResult, error) + ExecuteBlock() ([]*TransactionResult, error) + CommitBlock() (*flowgo.Block, error) +} + +type Contract struct { + Name string + Source string +} + +// Emulator defines the method set of an emulated emulator. +type Emulator interface { + ServiceKey() ServiceKey + AccessProvider + AutoMineCapable + ExecutionCapable +} diff --git a/integration/internal/emulator/errors.go b/integration/internal/emulator/errors.go new file mode 100644 index 00000000000..e23b6822ce6 --- /dev/null +++ b/integration/internal/emulator/errors.go @@ -0,0 +1,274 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/access" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + flowgo "github.com/onflow/flow-go/model/flow" +) + +var ErrNotFound = errors.New("could not find entity") + +type InvalidArgumentError struct { + msg string +} + +func (e InvalidArgumentError) Error() string { + return fmt.Sprintf("Invalid argument error: %s", e.msg) +} + +func NewInvalidArgumentError(msg string) *InvalidArgumentError { + return &InvalidArgumentError{msg: msg} +} + +type InternalError struct { + msg string +} + +func (e InternalError) Error() string { + return fmt.Sprintf("Internal error: %s", e.msg) +} + +func NewInternalError(msg string) *InternalError { + return &InternalError{msg: msg} +} + +// A NotFoundError indicates that an entity could not be found. +type NotFoundError interface { + isNotFoundError() +} + +// A BlockNotFoundError indicates that a block could not be found. +type BlockNotFoundError interface { + isBlockNotFoundError() +} + +// A BlockNotFoundByHeightError indicates that a block could not be found at the specified height. +type BlockNotFoundByHeightError struct { + Height uint64 +} + +func (e *BlockNotFoundByHeightError) isNotFoundError() {} +func (e *BlockNotFoundByHeightError) isBlockNotFoundError() {} + +func (e *BlockNotFoundByHeightError) Error() string { + return fmt.Sprintf("could not find block at height %d", e.Height) +} + +// A BlockNotFoundByIDError indicates that a block with the specified ID could not be found. +type BlockNotFoundByIDError struct { + ID flowgo.Identifier +} + +func (e *BlockNotFoundByIDError) isNotFoundError() {} +func (e *BlockNotFoundByIDError) isBlockNotFoundError() {} + +func (e *BlockNotFoundByIDError) Error() string { + return fmt.Sprintf("could not find block with ID %s", e.ID) +} + +// A CollectionNotFoundError indicates that a collection could not be found. +type CollectionNotFoundError struct { + ID flowgo.Identifier +} + +func (e *CollectionNotFoundError) isNotFoundError() {} + +func (e *CollectionNotFoundError) Error() string { + return fmt.Sprintf("could not find collection with ID %s", e.ID) +} + +// A TransactionNotFoundError indicates that a transaction could not be found. +type TransactionNotFoundError struct { + ID flowgo.Identifier +} + +func (e *TransactionNotFoundError) isNotFoundError() {} + +func (e *TransactionNotFoundError) Error() string { + return fmt.Sprintf("could not find transaction with ID %s", e.ID) +} + +// An AccountNotFoundError indicates that an account could not be found. +type AccountNotFoundError struct { + Address flowgo.Address +} + +func (e *AccountNotFoundError) isNotFoundError() {} + +func (e *AccountNotFoundError) Error() string { + return fmt.Sprintf("could not find account with address %s", e.Address) +} + +// A TransactionValidationError indicates that a submitted transaction is invalid. +type TransactionValidationError interface { + isTransactionValidationError() +} + +// A DuplicateTransactionError indicates that a transaction has already been submitted. +type DuplicateTransactionError struct { + TxID flowgo.Identifier +} + +func (e *DuplicateTransactionError) isTransactionValidationError() {} + +func (e *DuplicateTransactionError) Error() string { + return fmt.Sprintf("transaction with ID %s has already been submitted", e.TxID) +} + +// IncompleteTransactionError indicates that a transaction is missing one or more required fields. +type IncompleteTransactionError struct { + MissingFields []string +} + +func (e *IncompleteTransactionError) isTransactionValidationError() {} + +func (e *IncompleteTransactionError) Error() string { + return fmt.Sprintf("transaction is missing required fields: %s", e.MissingFields) +} + +// ExpiredTransactionError indicates that a transaction has expired. +type ExpiredTransactionError struct { + RefHeight, FinalHeight uint64 +} + +func (e *ExpiredTransactionError) isTransactionValidationError() {} + +func (e *ExpiredTransactionError) Error() string { + return fmt.Sprintf("transaction is expired: ref_height=%d final_height=%d", e.RefHeight, e.FinalHeight) +} + +// InvalidTransactionScriptError indicates that a transaction contains an invalid Cadence script. +type InvalidTransactionScriptError struct { + ParserErr error +} + +func (e *InvalidTransactionScriptError) isTransactionValidationError() {} + +func (e *InvalidTransactionScriptError) Error() string { + return fmt.Sprintf("failed to parse transaction Cadence script: %s", e.ParserErr) +} + +func (e *InvalidTransactionScriptError) Unwrap() error { + return e.ParserErr +} + +// InvalidTransactionGasLimitError indicates that a transaction specifies a gas limit that exceeds the maximum. +type InvalidTransactionGasLimitError struct { + Maximum uint64 + Actual uint64 +} + +func (e *InvalidTransactionGasLimitError) isTransactionValidationError() {} + +func (e *InvalidTransactionGasLimitError) Error() string { + return fmt.Sprintf("transaction gas limit (%d) exceeds the maximum gas limit (%d)", e.Actual, e.Maximum) +} + +// An InvalidStateVersionError indicates that a state version hash provided is invalid. +type InvalidStateVersionError struct { + Version crypto.Hash +} + +func (e *InvalidStateVersionError) Error() string { + return fmt.Sprintf("execution state with version hash %x is invalid", e.Version) +} + +// A PendingBlockCommitBeforeExecutionError indicates that the current pending block has not been executed (cannot commit). +type PendingBlockCommitBeforeExecutionError struct { + BlockID flowgo.Identifier +} + +func (e *PendingBlockCommitBeforeExecutionError) Error() string { + return fmt.Sprintf("pending block with ID %s cannot be committed before execution", e.BlockID) +} + +// A PendingBlockMidExecutionError indicates that the current pending block is mid-execution. +type PendingBlockMidExecutionError struct { + BlockID flowgo.Identifier +} + +func (e *PendingBlockMidExecutionError) Error() string { + return fmt.Sprintf("pending block with ID %s is currently being executed", e.BlockID) +} + +// A PendingBlockTransactionsExhaustedError indicates that the current pending block has finished executing (no more transactions to execute). +type PendingBlockTransactionsExhaustedError struct { + BlockID flowgo.Identifier +} + +func (e *PendingBlockTransactionsExhaustedError) Error() string { + return fmt.Sprintf("pending block with ID %s contains no more transactions to execute", e.BlockID) +} + +// A StorageError indicates that an error occurred in the storage provider. +type StorageError struct { + inner error +} + +func (e *StorageError) Error() string { + return fmt.Sprintf("storage failure: %v", e.inner) +} + +func (e *StorageError) Unwrap() error { + return e.inner +} + +// An ExecutionError occurs when a transaction fails to execute. +type ExecutionError struct { + Code int + Message string +} + +func (e *ExecutionError) Error() string { + return fmt.Sprintf("execution error code %d: %s", e.Code, e.Message) +} + +type FVMError struct { + FlowError fvmerrors.CodedError +} + +func (f *FVMError) Error() string { + return f.FlowError.Error() +} + +func (f *FVMError) Unwrap() error { + return f.FlowError +} + +func ConvertAccessError(err error) error { + switch typedErr := err.(type) { + case access.IncompleteTransactionError: + return &IncompleteTransactionError{MissingFields: typedErr.MissingFields} + case access.ExpiredTransactionError: + return &ExpiredTransactionError{RefHeight: typedErr.RefHeight, FinalHeight: typedErr.FinalHeight} + case access.InvalidGasLimitError: + return &InvalidTransactionGasLimitError{Maximum: typedErr.Maximum, Actual: typedErr.Actual} + case access.InvalidScriptError: + return &InvalidTransactionScriptError{ParserErr: typedErr.ParserErr} + } + + return err +} diff --git a/integration/internal/emulator/ledger.go b/integration/internal/emulator/ledger.go new file mode 100644 index 00000000000..f1bcdac7d79 --- /dev/null +++ b/integration/internal/emulator/ledger.go @@ -0,0 +1,143 @@ +package emulator + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/storage/snapshot" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func configureLedger( + conf config, + store EmulatorStorage, + vm *fvm.VirtualMachine, + ctx fvm.Context, +) ( + *flowgo.Block, + snapshot.StorageSnapshot, + error, +) { + + latestBlock, err := store.LatestBlock(context.Background()) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + if errors.Is(err, ErrNotFound) { + // bootstrap the ledger with the genesis block + ledger, err := store.LedgerByHeight(context.Background(), 0) + if err != nil { + return nil, nil, err + } + + genesisExecutionSnapshot, err := bootstrapLedger(vm, ctx, ledger, conf) + if err != nil { + return nil, nil, fmt.Errorf("failed to bootstrap execution state: %w", err) + } + + // commit the genesis block to storage + genesis := flowgo.Genesis(conf.GetChainID()) + latestBlock = *genesis + + err = store.CommitBlock( + context.Background(), + *genesis, + nil, + nil, + nil, + genesisExecutionSnapshot, + nil, + ) + if err != nil { + return nil, nil, err + } + } + + latestLedger, err := store.LedgerByHeight( + context.Background(), + latestBlock.Header.Height, + ) + + if err != nil { + return nil, nil, err + } + + return &latestBlock, latestLedger, nil +} + +func bootstrapLedger( + vm *fvm.VirtualMachine, + ctx fvm.Context, + ledger snapshot.StorageSnapshot, + conf config, +) ( + *snapshot.ExecutionSnapshot, + error, +) { + serviceKey := conf.GetServiceKey() + + ctx = fvm.NewContextFromParent( + ctx, + fvm.WithAccountStorageLimit(false), + ) + + flowAccountKey := flowgo.AccountPublicKey{ + PublicKey: serviceKey.PublicKey, + SignAlgo: serviceKey.SigAlgo, + HashAlgo: serviceKey.HashAlgo, + Weight: fvm.AccountKeyWeightThreshold, + } + + bootstrap := configureBootstrapProcedure(conf, flowAccountKey, conf.GenesisTokenSupply) + + executionSnapshot, output, err := vm.Run(ctx, bootstrap, ledger) + if err != nil { + return nil, err + } + + if output.Err != nil { + return nil, output.Err + } + + return executionSnapshot, nil +} + +func configureBootstrapProcedure(conf config, flowAccountKey flowgo.AccountPublicKey, supply cadence.UFix64) *fvm.BootstrapProcedure { + options := make([]fvm.BootstrapProcedureOption, 0) + options = append(options, + fvm.WithInitialTokenSupply(supply), + fvm.WithRestrictedAccountCreationEnabled(false), + // This enables variable transaction fees AND execution effort metering + // as described in Variable Transaction Fees: + // Execution Effort FLIP: https://github.com/onflow/flow/pull/753 + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithExecutionMemoryLimit(math.MaxUint32), + fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), + fvm.WithExecutionEffortWeights(environment.MainnetExecutionEffortWeights), + ) + + if conf.ExecutionEffortWeights != nil { + options = append(options, + fvm.WithExecutionEffortWeights(conf.ExecutionEffortWeights), + ) + } + if conf.StorageLimitEnabled { + options = append(options, + fvm.WithAccountCreationFee(conf.MinimumStorageReservation), + fvm.WithMinimumStorageReservation(conf.MinimumStorageReservation), + fvm.WithStorageMBPerFLOW(conf.StorageMBPerFLOW), + ) + } + return fvm.Bootstrap( + flowAccountKey, + options..., + ) +} diff --git a/integration/internal/emulator/memstore.go b/integration/internal/emulator/memstore.go new file mode 100644 index 00000000000..191cd21f4d8 --- /dev/null +++ b/integration/internal/emulator/memstore.go @@ -0,0 +1,395 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/snapshot" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// Store implements the Store interface with an in-memory store. +type Store struct { + mu sync.RWMutex + // block ID to block height + blockIDToHeight map[flowgo.Identifier]uint64 + // blocks by height + blocks map[uint64]flowgo.Block + // collections by ID + collections map[flowgo.Identifier]flowgo.LightCollection + // transactions by ID + transactions map[flowgo.Identifier]flowgo.TransactionBody + // Transaction results by ID + transactionResults map[flowgo.Identifier]StorableTransactionResult + // Ledger states by block height + ledger map[uint64]snapshot.SnapshotTree + // events by block height + eventsByBlockHeight map[uint64][]flowgo.Event + // highest block height + blockHeight uint64 +} + +var _ environment.Blocks = &Store{} +var _ access.Blocks = &Store{} +var _ EmulatorStorage = &Store{} + +func (b *Store) HeaderByID(id flowgo.Identifier) (*flowgo.Header, error) { + block, err := b.BlockByID(context.Background(), id) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, nil + } + return nil, err + } + return block.Header, nil +} + +func (b *Store) FinalizedHeader() (*flowgo.Header, error) { + block, err := b.LatestBlock(context.Background()) + if err != nil { + return nil, err + } + + return block.Header, nil +} + +func (b *Store) SealedHeader() (*flowgo.Header, error) { + block, err := b.LatestBlock(context.Background()) + if err != nil { + return nil, err + } + + return block.Header, nil +} + +func (b *Store) IndexedHeight() (uint64, error) { + block, err := b.LatestBlock(context.Background()) + if err != nil { + return 0, err + } + + return block.Header.Height, nil +} + +// ByHeightFrom We don't have to do anything complex here, as emulator does not fork the chain +func (b *Store) ByHeightFrom(height uint64, header *flowgo.Header) (*flowgo.Header, error) { + if height > header.Height { + return nil, ErrNotFound + } + block, err := b.BlockByHeight(context.Background(), height) + if err != nil { + return nil, err + } + + return block.Header, nil +} + +// New returns a new in-memory Store implementation. +func NewMemoryStore() *Store { + return &Store{ + mu: sync.RWMutex{}, + blockIDToHeight: make(map[flowgo.Identifier]uint64), + blocks: make(map[uint64]flowgo.Block), + collections: make(map[flowgo.Identifier]flowgo.LightCollection), + transactions: make(map[flowgo.Identifier]flowgo.TransactionBody), + transactionResults: make(map[flowgo.Identifier]StorableTransactionResult), + ledger: make(map[uint64]snapshot.SnapshotTree), + eventsByBlockHeight: make(map[uint64][]flowgo.Event), + } +} + +func (b *Store) Start() error { + return nil +} + +func (b *Store) Stop() { +} + +func (b *Store) LatestBlockHeight(ctx context.Context) (uint64, error) { + block, err := b.LatestBlock(ctx) + if err != nil { + return 0, err + } + + return block.Header.Height, nil +} + +func (b *Store) LatestBlock(_ context.Context) (flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + latestBlock, ok := b.blocks[b.blockHeight] + if !ok { + return flowgo.Block{}, ErrNotFound + } + return latestBlock, nil +} + +func (b *Store) StoreBlock(_ context.Context, block *flowgo.Block) error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.storeBlock(block) +} + +func (b *Store) storeBlock(block *flowgo.Block) error { + b.blocks[block.Header.Height] = *block + b.blockIDToHeight[block.ID()] = block.Header.Height + + if block.Header.Height > b.blockHeight { + b.blockHeight = block.Header.Height + } + + return nil +} + +func (b *Store) BlockByID(_ context.Context, blockID flowgo.Identifier) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + blockHeight, ok := b.blockIDToHeight[blockID] + if !ok { + return nil, ErrNotFound + } + + block, ok := b.blocks[blockHeight] + if !ok { + return nil, ErrNotFound + } + + return &block, nil + +} + +func (b *Store) BlockByHeight(_ context.Context, height uint64) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, ok := b.blocks[height] + if !ok { + return nil, ErrNotFound + } + + return &block, nil +} + +func (b *Store) CommitBlock( + _ context.Context, + block flowgo.Block, + collections []*flowgo.LightCollection, + transactions map[flowgo.Identifier]*flowgo.TransactionBody, + transactionResults map[flowgo.Identifier]*StorableTransactionResult, + executionSnapshot *snapshot.ExecutionSnapshot, + events []flowgo.Event, +) error { + b.mu.Lock() + defer b.mu.Unlock() + + if len(transactions) != len(transactionResults) { + return fmt.Errorf( + "transactions count (%d) does not match result count (%d)", + len(transactions), + len(transactionResults), + ) + } + + err := b.storeBlock(&block) + if err != nil { + return err + } + + for _, col := range collections { + err := b.InsertCollection(*col) + if err != nil { + return err + } + } + + for _, tx := range transactions { + err := b.InsertTransaction(tx.ID(), *tx) + if err != nil { + return err + } + } + + for txID, result := range transactionResults { + err := b.InsertTransactionResult(txID, *result) + if err != nil { + return err + } + } + + err = b.InsertExecutionSnapshot( + block.Header.Height, + executionSnapshot) + if err != nil { + return err + } + + err = b.InsertEvents(block.Header.Height, events) + if err != nil { + return err + } + + return nil +} + +func (b *Store) CollectionByID( + _ context.Context, + collectionID flowgo.Identifier, +) (flowgo.LightCollection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + tx, ok := b.collections[collectionID] + if !ok { + return flowgo.LightCollection{}, ErrNotFound + } + return tx, nil +} + +func (b *Store) FullCollectionByID( + _ context.Context, + collectionID flowgo.Identifier, +) (flowgo.Collection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + light, ok := b.collections[collectionID] + if !ok { + return flowgo.Collection{}, ErrNotFound + } + + txs := make([]*flowgo.TransactionBody, len(light.Transactions)) + for i, txID := range light.Transactions { + tx, ok := b.transactions[txID] + if !ok { + return flowgo.Collection{}, ErrNotFound + } + txs[i] = &tx + } + + return flowgo.Collection{ + Transactions: txs, + }, nil +} + +func (b *Store) TransactionByID( + _ context.Context, + transactionID flowgo.Identifier, +) (flowgo.TransactionBody, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + tx, ok := b.transactions[transactionID] + if !ok { + return flowgo.TransactionBody{}, ErrNotFound + } + return tx, nil + +} + +func (b *Store) TransactionResultByID( + _ context.Context, + transactionID flowgo.Identifier, +) (StorableTransactionResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + result, ok := b.transactionResults[transactionID] + if !ok { + return StorableTransactionResult{}, ErrNotFound + } + return result, nil + +} + +func (b *Store) LedgerByHeight( + _ context.Context, + blockHeight uint64, +) (snapshot.StorageSnapshot, error) { + return b.ledger[blockHeight], nil +} + +func (b *Store) EventsByHeight( + _ context.Context, + blockHeight uint64, + eventType string, +) ([]flowgo.Event, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + allEvents := b.eventsByBlockHeight[blockHeight] + + events := make([]flowgo.Event, 0) + + for _, event := range allEvents { + if eventType == "" { + events = append(events, event) + } else { + if string(event.Type) == eventType { + events = append(events, event) + } + } + } + + return events, nil +} + +func (b *Store) InsertCollection(col flowgo.LightCollection) error { + b.collections[col.ID()] = col + return nil +} + +func (b *Store) InsertTransaction(txID flowgo.Identifier, tx flowgo.TransactionBody) error { + b.transactions[txID] = tx + return nil +} + +func (b *Store) InsertTransactionResult(txID flowgo.Identifier, result StorableTransactionResult) error { + b.transactionResults[txID] = result + return nil +} + +func (b *Store) InsertExecutionSnapshot( + blockHeight uint64, + executionSnapshot *snapshot.ExecutionSnapshot, +) error { + oldLedger := b.ledger[blockHeight-1] + + b.ledger[blockHeight] = oldLedger.Append(executionSnapshot) + + return nil +} + +func (b *Store) InsertEvents(blockHeight uint64, events []flowgo.Event) error { + if b.eventsByBlockHeight[blockHeight] == nil { + b.eventsByBlockHeight[blockHeight] = events + } else { + b.eventsByBlockHeight[blockHeight] = append(b.eventsByBlockHeight[blockHeight], events...) + } + + return nil +} diff --git a/integration/internal/emulator/mocks/emulator.go b/integration/internal/emulator/mocks/emulator.go new file mode 100644 index 00000000000..1e81328bafe --- /dev/null +++ b/integration/internal/emulator/mocks/emulator.go @@ -0,0 +1,469 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/onflow/flow-go/integration/emulator (interfaces: Emulator) +// +// Generated by this command: +// +// mockgen -destination=emulator/mocks/emulator.go -package=mocks github.com/onflow/flow-go/integration/emulator Emulator +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + + access "github.com/onflow/flow-go/access" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flow "github.com/onflow/flow-go/model/flow" +) + +// MockEmulator is a mock of Emulator interface. +type MockEmulator struct { + ctrl *gomock.Controller + recorder *MockEmulatorMockRecorder + isgomock struct{} +} + +// MockEmulatorMockRecorder is the mock recorder for MockEmulator. +type MockEmulatorMockRecorder struct { + mock *MockEmulator +} + +// NewMockEmulator creates a new mock instance. +func NewMockEmulator(ctrl *gomock.Controller) *MockEmulator { + mock := &MockEmulator{ctrl: ctrl} + mock.recorder = &MockEmulatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEmulator) EXPECT() *MockEmulatorMockRecorder { + return m.recorder +} + +// AddTransaction mocks base method. +func (m *MockEmulator) AddTransaction(tx flow.TransactionBody) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddTransaction", tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddTransaction indicates an expected call of AddTransaction. +func (mr *MockEmulatorMockRecorder) AddTransaction(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTransaction", reflect.TypeOf((*MockEmulator)(nil).AddTransaction), tx) +} + +// CommitBlock mocks base method. +func (m *MockEmulator) CommitBlock() (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitBlock") + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CommitBlock indicates an expected call of CommitBlock. +func (mr *MockEmulatorMockRecorder) CommitBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBlock", reflect.TypeOf((*MockEmulator)(nil).CommitBlock)) +} + +// DisableAutoMine mocks base method. +func (m *MockEmulator) DisableAutoMine() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DisableAutoMine") +} + +// DisableAutoMine indicates an expected call of DisableAutoMine. +func (mr *MockEmulatorMockRecorder) DisableAutoMine() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAutoMine", reflect.TypeOf((*MockEmulator)(nil).DisableAutoMine)) +} + +// EnableAutoMine mocks base method. +func (m *MockEmulator) EnableAutoMine() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "EnableAutoMine") +} + +// EnableAutoMine indicates an expected call of EnableAutoMine. +func (mr *MockEmulatorMockRecorder) EnableAutoMine() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAutoMine", reflect.TypeOf((*MockEmulator)(nil).EnableAutoMine)) +} + +// ExecuteAndCommitBlock mocks base method. +func (m *MockEmulator) ExecuteAndCommitBlock() (*flow.Block, []*emulator.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteAndCommitBlock") + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].([]*emulator.TransactionResult) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ExecuteAndCommitBlock indicates an expected call of ExecuteAndCommitBlock. +func (mr *MockEmulatorMockRecorder) ExecuteAndCommitBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteAndCommitBlock", reflect.TypeOf((*MockEmulator)(nil).ExecuteAndCommitBlock)) +} + +// ExecuteBlock mocks base method. +func (m *MockEmulator) ExecuteBlock() ([]*emulator.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteBlock") + ret0, _ := ret[0].([]*emulator.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteBlock indicates an expected call of ExecuteBlock. +func (mr *MockEmulatorMockRecorder) ExecuteBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteBlock", reflect.TypeOf((*MockEmulator)(nil).ExecuteBlock)) +} + +// ExecuteNextTransaction mocks base method. +func (m *MockEmulator) ExecuteNextTransaction() (*emulator.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteNextTransaction") + ret0, _ := ret[0].(*emulator.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteNextTransaction indicates an expected call of ExecuteNextTransaction. +func (mr *MockEmulatorMockRecorder) ExecuteNextTransaction() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteNextTransaction", reflect.TypeOf((*MockEmulator)(nil).ExecuteNextTransaction)) +} + +// ExecuteScript mocks base method. +func (m *MockEmulator) ExecuteScript(script []byte, arguments [][]byte) (*emulator.ScriptResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteScript", script, arguments) + ret0, _ := ret[0].(*emulator.ScriptResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteScript indicates an expected call of ExecuteScript. +func (mr *MockEmulatorMockRecorder) ExecuteScript(script, arguments any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteScript", reflect.TypeOf((*MockEmulator)(nil).ExecuteScript), script, arguments) +} + +// ExecuteScriptAtBlockHeight mocks base method. +func (m *MockEmulator) ExecuteScriptAtBlockHeight(script []byte, arguments [][]byte, blockHeight uint64) (*emulator.ScriptResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteScriptAtBlockHeight", script, arguments, blockHeight) + ret0, _ := ret[0].(*emulator.ScriptResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteScriptAtBlockHeight indicates an expected call of ExecuteScriptAtBlockHeight. +func (mr *MockEmulatorMockRecorder) ExecuteScriptAtBlockHeight(script, arguments, blockHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteScriptAtBlockHeight", reflect.TypeOf((*MockEmulator)(nil).ExecuteScriptAtBlockHeight), script, arguments, blockHeight) +} + +// ExecuteScriptAtBlockID mocks base method. +func (m *MockEmulator) ExecuteScriptAtBlockID(script []byte, arguments [][]byte, id flow.Identifier) (*emulator.ScriptResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteScriptAtBlockID", script, arguments, id) + ret0, _ := ret[0].(*emulator.ScriptResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteScriptAtBlockID indicates an expected call of ExecuteScriptAtBlockID. +func (mr *MockEmulatorMockRecorder) ExecuteScriptAtBlockID(script, arguments, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteScriptAtBlockID", reflect.TypeOf((*MockEmulator)(nil).ExecuteScriptAtBlockID), script, arguments, id) +} + +// GetAccount mocks base method. +func (m *MockEmulator) GetAccount(address flow.Address) (*flow.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccount", address) + ret0, _ := ret[0].(*flow.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccount indicates an expected call of GetAccount. +func (mr *MockEmulatorMockRecorder) GetAccount(address any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccount", reflect.TypeOf((*MockEmulator)(nil).GetAccount), address) +} + +// GetAccountAtBlockHeight mocks base method. +func (m *MockEmulator) GetAccountAtBlockHeight(address flow.Address, blockHeight uint64) (*flow.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountAtBlockHeight", address, blockHeight) + ret0, _ := ret[0].(*flow.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountAtBlockHeight indicates an expected call of GetAccountAtBlockHeight. +func (mr *MockEmulatorMockRecorder) GetAccountAtBlockHeight(address, blockHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAtBlockHeight", reflect.TypeOf((*MockEmulator)(nil).GetAccountAtBlockHeight), address, blockHeight) +} + +// GetAccountByIndex mocks base method. +func (m *MockEmulator) GetAccountByIndex(arg0 uint) (*flow.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByIndex", arg0) + ret0, _ := ret[0].(*flow.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByIndex indicates an expected call of GetAccountByIndex. +func (mr *MockEmulatorMockRecorder) GetAccountByIndex(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByIndex", reflect.TypeOf((*MockEmulator)(nil).GetAccountByIndex), arg0) +} + +// GetBlockByHeight mocks base method. +func (m *MockEmulator) GetBlockByHeight(height uint64) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByHeight", height) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByHeight indicates an expected call of GetBlockByHeight. +func (mr *MockEmulatorMockRecorder) GetBlockByHeight(height any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHeight", reflect.TypeOf((*MockEmulator)(nil).GetBlockByHeight), height) +} + +// GetBlockByID mocks base method. +func (m *MockEmulator) GetBlockByID(id flow.Identifier) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByID", id) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByID indicates an expected call of GetBlockByID. +func (mr *MockEmulatorMockRecorder) GetBlockByID(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByID", reflect.TypeOf((*MockEmulator)(nil).GetBlockByID), id) +} + +// GetCollectionByID mocks base method. +func (m *MockEmulator) GetCollectionByID(colID flow.Identifier) (*flow.LightCollection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCollectionByID", colID) + ret0, _ := ret[0].(*flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCollectionByID indicates an expected call of GetCollectionByID. +func (mr *MockEmulatorMockRecorder) GetCollectionByID(colID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCollectionByID", reflect.TypeOf((*MockEmulator)(nil).GetCollectionByID), colID) +} + +// GetEventsByHeight mocks base method. +func (m *MockEmulator) GetEventsByHeight(blockHeight uint64, eventType string) ([]flow.Event, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventsByHeight", blockHeight, eventType) + ret0, _ := ret[0].([]flow.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventsByHeight indicates an expected call of GetEventsByHeight. +func (mr *MockEmulatorMockRecorder) GetEventsByHeight(blockHeight, eventType any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventsByHeight", reflect.TypeOf((*MockEmulator)(nil).GetEventsByHeight), blockHeight, eventType) +} + +// GetEventsForBlockIDs mocks base method. +func (m *MockEmulator) GetEventsForBlockIDs(eventType string, blockIDs []flow.Identifier) ([]flow.BlockEvents, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventsForBlockIDs", eventType, blockIDs) + ret0, _ := ret[0].([]flow.BlockEvents) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventsForBlockIDs indicates an expected call of GetEventsForBlockIDs. +func (mr *MockEmulatorMockRecorder) GetEventsForBlockIDs(eventType, blockIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventsForBlockIDs", reflect.TypeOf((*MockEmulator)(nil).GetEventsForBlockIDs), eventType, blockIDs) +} + +// GetEventsForHeightRange mocks base method. +func (m *MockEmulator) GetEventsForHeightRange(eventType string, startHeight, endHeight uint64) ([]flow.BlockEvents, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventsForHeightRange", eventType, startHeight, endHeight) + ret0, _ := ret[0].([]flow.BlockEvents) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventsForHeightRange indicates an expected call of GetEventsForHeightRange. +func (mr *MockEmulatorMockRecorder) GetEventsForHeightRange(eventType, startHeight, endHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventsForHeightRange", reflect.TypeOf((*MockEmulator)(nil).GetEventsForHeightRange), eventType, startHeight, endHeight) +} + +// GetFullCollectionByID mocks base method. +func (m *MockEmulator) GetFullCollectionByID(colID flow.Identifier) (*flow.Collection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFullCollectionByID", colID) + ret0, _ := ret[0].(*flow.Collection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFullCollectionByID indicates an expected call of GetFullCollectionByID. +func (mr *MockEmulatorMockRecorder) GetFullCollectionByID(colID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFullCollectionByID", reflect.TypeOf((*MockEmulator)(nil).GetFullCollectionByID), colID) +} + +// GetLatestBlock mocks base method. +func (m *MockEmulator) GetLatestBlock() (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestBlock") + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestBlock indicates an expected call of GetLatestBlock. +func (mr *MockEmulatorMockRecorder) GetLatestBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestBlock", reflect.TypeOf((*MockEmulator)(nil).GetLatestBlock)) +} + +// GetNetworkParameters mocks base method. +func (m *MockEmulator) GetNetworkParameters() access.NetworkParameters { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkParameters") + ret0, _ := ret[0].(access.NetworkParameters) + return ret0 +} + +// GetNetworkParameters indicates an expected call of GetNetworkParameters. +func (mr *MockEmulatorMockRecorder) GetNetworkParameters() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkParameters", reflect.TypeOf((*MockEmulator)(nil).GetNetworkParameters)) +} + +// GetTransaction mocks base method. +func (m *MockEmulator) GetTransaction(txID flow.Identifier) (*flow.TransactionBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransaction", txID) + ret0, _ := ret[0].(*flow.TransactionBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransaction indicates an expected call of GetTransaction. +func (mr *MockEmulatorMockRecorder) GetTransaction(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransaction", reflect.TypeOf((*MockEmulator)(nil).GetTransaction), txID) +} + +// GetTransactionResult mocks base method. +func (m *MockEmulator) GetTransactionResult(txID flow.Identifier) (*access.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransactionResult", txID) + ret0, _ := ret[0].(*access.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransactionResult indicates an expected call of GetTransactionResult. +func (mr *MockEmulatorMockRecorder) GetTransactionResult(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransactionResult", reflect.TypeOf((*MockEmulator)(nil).GetTransactionResult), txID) +} + +// GetTransactionResultsByBlockID mocks base method. +func (m *MockEmulator) GetTransactionResultsByBlockID(blockID flow.Identifier) ([]*access.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransactionResultsByBlockID", blockID) + ret0, _ := ret[0].([]*access.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransactionResultsByBlockID indicates an expected call of GetTransactionResultsByBlockID. +func (mr *MockEmulatorMockRecorder) GetTransactionResultsByBlockID(blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransactionResultsByBlockID", reflect.TypeOf((*MockEmulator)(nil).GetTransactionResultsByBlockID), blockID) +} + +// GetTransactionsByBlockID mocks base method. +func (m *MockEmulator) GetTransactionsByBlockID(blockID flow.Identifier) ([]*flow.TransactionBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransactionsByBlockID", blockID) + ret0, _ := ret[0].([]*flow.TransactionBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransactionsByBlockID indicates an expected call of GetTransactionsByBlockID. +func (mr *MockEmulatorMockRecorder) GetTransactionsByBlockID(blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransactionsByBlockID", reflect.TypeOf((*MockEmulator)(nil).GetTransactionsByBlockID), blockID) +} + +// Ping mocks base method. +func (m *MockEmulator) Ping() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ping") + ret0, _ := ret[0].(error) + return ret0 +} + +// Ping indicates an expected call of Ping. +func (mr *MockEmulatorMockRecorder) Ping() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockEmulator)(nil).Ping)) +} + +// SendTransaction mocks base method. +func (m *MockEmulator) SendTransaction(tx *flow.TransactionBody) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendTransaction", tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendTransaction indicates an expected call of SendTransaction. +func (mr *MockEmulatorMockRecorder) SendTransaction(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendTransaction", reflect.TypeOf((*MockEmulator)(nil).SendTransaction), tx) +} + +// ServiceKey mocks base method. +func (m *MockEmulator) ServiceKey() emulator.ServiceKey { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ServiceKey") + ret0, _ := ret[0].(emulator.ServiceKey) + return ret0 +} + +// ServiceKey indicates an expected call of ServiceKey. +func (mr *MockEmulatorMockRecorder) ServiceKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceKey", reflect.TypeOf((*MockEmulator)(nil).ServiceKey)) +} diff --git a/integration/internal/emulator/mocks/emulatorStorage.go b/integration/internal/emulator/mocks/emulatorStorage.go new file mode 100644 index 00000000000..32f9b4d6170 --- /dev/null +++ b/integration/internal/emulator/mocks/emulatorStorage.go @@ -0,0 +1,324 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/onflow/flow-emulator/emulator (interfaces: EmulatorStorage) +// +// Generated by this command: +// +// mockgen -destination=emulator/mocks/emulatorStorage.go -package=mocks github.com/onflow/flow-emulator/emulator EmulatorStorage +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + + snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/integration/internal/emulator" + flow "github.com/onflow/flow-go/model/flow" +) + +// MockEmulatorStorage is a mock of EmulatorStorage interface. +type MockEmulatorStorage struct { + ctrl *gomock.Controller + recorder *MockEmulatorStorageMockRecorder + isgomock struct{} +} + +// MockEmulatorStorageMockRecorder is the mock recorder for MockEmulatorStorage. +type MockEmulatorStorageMockRecorder struct { + mock *MockEmulatorStorage +} + +// NewMockEmulatorStorage creates a new mock instance. +func NewMockEmulatorStorage(ctrl *gomock.Controller) *MockEmulatorStorage { + mock := &MockEmulatorStorage{ctrl: ctrl} + mock.recorder = &MockEmulatorStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEmulatorStorage) EXPECT() *MockEmulatorStorageMockRecorder { + return m.recorder +} + +// BlockByHeight mocks base method. +func (m *MockEmulatorStorage) BlockByHeight(ctx context.Context, height uint64) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByHeight", ctx, height) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByHeight indicates an expected call of BlockByHeight. +func (mr *MockEmulatorStorageMockRecorder) BlockByHeight(ctx, height any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).BlockByHeight), ctx, height) +} + +// BlockByID mocks base method. +func (m *MockEmulatorStorage) BlockByID(ctx context.Context, blockID flow.Identifier) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByID", ctx, blockID) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByID indicates an expected call of BlockByID. +func (mr *MockEmulatorStorageMockRecorder) BlockByID(ctx, blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByID", reflect.TypeOf((*MockEmulatorStorage)(nil).BlockByID), ctx, blockID) +} + +// ByHeightFrom mocks base method. +func (m *MockEmulatorStorage) ByHeightFrom(height uint64, header *flow.Header) (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ByHeightFrom", height, header) + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ByHeightFrom indicates an expected call of ByHeightFrom. +func (mr *MockEmulatorStorageMockRecorder) ByHeightFrom(height, header any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByHeightFrom", reflect.TypeOf((*MockEmulatorStorage)(nil).ByHeightFrom), height, header) +} + +// CollectionByID mocks base method. +func (m *MockEmulatorStorage) CollectionByID(ctx context.Context, collectionID flow.Identifier) (flow.LightCollection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CollectionByID", ctx, collectionID) + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CollectionByID indicates an expected call of CollectionByID. +func (mr *MockEmulatorStorageMockRecorder) CollectionByID(ctx, collectionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectionByID", reflect.TypeOf((*MockEmulatorStorage)(nil).CollectionByID), ctx, collectionID) +} + +// CommitBlock mocks base method. +func (m *MockEmulatorStorage) CommitBlock(ctx context.Context, block flow.Block, collections []*flow.LightCollection, transactions map[flow.Identifier]*flow.TransactionBody, transactionResults map[flow.Identifier]*emulator.StorableTransactionResult, executionSnapshot *snapshot.ExecutionSnapshot, events []flow.Event) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitBlock", ctx, block, collections, transactions, transactionResults, executionSnapshot, events) + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitBlock indicates an expected call of CommitBlock. +func (mr *MockEmulatorStorageMockRecorder) CommitBlock(ctx, block, collections, transactions, transactionResults, executionSnapshot, events any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBlock", reflect.TypeOf((*MockEmulatorStorage)(nil).CommitBlock), ctx, block, collections, transactions, transactionResults, executionSnapshot, events) +} + +// EventsByHeight mocks base method. +func (m *MockEmulatorStorage) EventsByHeight(ctx context.Context, blockHeight uint64, eventType string) ([]flow.Event, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EventsByHeight", ctx, blockHeight, eventType) + ret0, _ := ret[0].([]flow.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EventsByHeight indicates an expected call of EventsByHeight. +func (mr *MockEmulatorStorageMockRecorder) EventsByHeight(ctx, blockHeight, eventType any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventsByHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).EventsByHeight), ctx, blockHeight, eventType) +} + +// FinalizedHeader mocks base method. +func (m *MockEmulatorStorage) FinalizedHeader() (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FinalizedHeader") + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FinalizedHeader indicates an expected call of FinalizedHeader. +func (mr *MockEmulatorStorageMockRecorder) FinalizedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizedHeader", reflect.TypeOf((*MockEmulatorStorage)(nil).FinalizedHeader)) +} + +// FullCollectionByID mocks base method. +func (m *MockEmulatorStorage) FullCollectionByID(ctx context.Context, collectionID flow.Identifier) (flow.Collection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FullCollectionByID", ctx, collectionID) + ret0, _ := ret[0].(flow.Collection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FullCollectionByID indicates an expected call of FullCollectionByID. +func (mr *MockEmulatorStorageMockRecorder) FullCollectionByID(ctx, collectionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FullCollectionByID", reflect.TypeOf((*MockEmulatorStorage)(nil).FullCollectionByID), ctx, collectionID) +} + +// HeaderByID mocks base method. +func (m *MockEmulatorStorage) HeaderByID(id flow.Identifier) (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByID", id) + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByID indicates an expected call of HeaderByID. +func (mr *MockEmulatorStorageMockRecorder) HeaderByID(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByID", reflect.TypeOf((*MockEmulatorStorage)(nil).HeaderByID), id) +} + +// IndexedHeight mocks base method. +func (m *MockEmulatorStorage) IndexedHeight() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IndexedHeight") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IndexedHeight indicates an expected call of IndexedHeight. +func (mr *MockEmulatorStorageMockRecorder) IndexedHeight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexedHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).IndexedHeight)) +} + +// LatestBlock mocks base method. +func (m *MockEmulatorStorage) LatestBlock(ctx context.Context) (flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LatestBlock", ctx) + ret0, _ := ret[0].(flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LatestBlock indicates an expected call of LatestBlock. +func (mr *MockEmulatorStorageMockRecorder) LatestBlock(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatestBlock", reflect.TypeOf((*MockEmulatorStorage)(nil).LatestBlock), ctx) +} + +// LatestBlockHeight mocks base method. +func (m *MockEmulatorStorage) LatestBlockHeight(ctx context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LatestBlockHeight", ctx) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LatestBlockHeight indicates an expected call of LatestBlockHeight. +func (mr *MockEmulatorStorageMockRecorder) LatestBlockHeight(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatestBlockHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).LatestBlockHeight), ctx) +} + +// LedgerByHeight mocks base method. +func (m *MockEmulatorStorage) LedgerByHeight(ctx context.Context, blockHeight uint64) (snapshot.StorageSnapshot, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LedgerByHeight", ctx, blockHeight) + ret0, _ := ret[0].(snapshot.StorageSnapshot) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LedgerByHeight indicates an expected call of LedgerByHeight. +func (mr *MockEmulatorStorageMockRecorder) LedgerByHeight(ctx, blockHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LedgerByHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).LedgerByHeight), ctx, blockHeight) +} + +// SealedHeader mocks base method. +func (m *MockEmulatorStorage) SealedHeader() (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SealedHeader") + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SealedHeader indicates an expected call of SealedHeader. +func (mr *MockEmulatorStorageMockRecorder) SealedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SealedHeader", reflect.TypeOf((*MockEmulatorStorage)(nil).SealedHeader)) +} + +// Start mocks base method. +func (m *MockEmulatorStorage) Start() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start") + ret0, _ := ret[0].(error) + return ret0 +} + +// Start indicates an expected call of Start. +func (mr *MockEmulatorStorageMockRecorder) Start() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockEmulatorStorage)(nil).Start)) +} + +// Stop mocks base method. +func (m *MockEmulatorStorage) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockEmulatorStorageMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockEmulatorStorage)(nil).Stop)) +} + +// StoreBlock mocks base method. +func (m *MockEmulatorStorage) StoreBlock(ctx context.Context, block *flow.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreBlock", ctx, block) + ret0, _ := ret[0].(error) + return ret0 +} + +// StoreBlock indicates an expected call of StoreBlock. +func (mr *MockEmulatorStorageMockRecorder) StoreBlock(ctx, block any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreBlock", reflect.TypeOf((*MockEmulatorStorage)(nil).StoreBlock), ctx, block) +} + +// TransactionByID mocks base method. +func (m *MockEmulatorStorage) TransactionByID(ctx context.Context, transactionID flow.Identifier) (flow.TransactionBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionByID", ctx, transactionID) + ret0, _ := ret[0].(flow.TransactionBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TransactionByID indicates an expected call of TransactionByID. +func (mr *MockEmulatorStorageMockRecorder) TransactionByID(ctx, transactionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionByID", reflect.TypeOf((*MockEmulatorStorage)(nil).TransactionByID), ctx, transactionID) +} + +// TransactionResultByID mocks base method. +func (m *MockEmulatorStorage) TransactionResultByID(ctx context.Context, transactionID flow.Identifier) (emulator.StorableTransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionResultByID", ctx, transactionID) + ret0, _ := ret[0].(emulator.StorableTransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TransactionResultByID indicates an expected call of TransactionResultByID. +func (mr *MockEmulatorStorageMockRecorder) TransactionResultByID(ctx, transactionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionResultByID", reflect.TypeOf((*MockEmulatorStorage)(nil).TransactionResultByID), ctx, transactionID) +} diff --git a/integration/internal/emulator/pendingBlock.go b/integration/internal/emulator/pendingBlock.go new file mode 100644 index 00000000000..4d92e8a40a7 --- /dev/null +++ b/integration/internal/emulator/pendingBlock.go @@ -0,0 +1,236 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package emulator + +import ( + "math/rand" + "time" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/storage/state" + flowgo "github.com/onflow/flow-go/model/flow" +) + +type IndexedTransactionResult struct { + fvm.ProcedureOutput + Index uint32 +} + +// MaxViewIncrease represents the largest difference in view number between +// two consecutive blocks. The minimum view increment is 1. +const MaxViewIncrease = 3 + +// A pendingBlock contains the pending state required to form a new block. +type pendingBlock struct { + height uint64 + view uint64 + parentID flowgo.Identifier + timestamp time.Time + // mapping from transaction ID to transaction + transactions map[flowgo.Identifier]*flowgo.TransactionBody + // list of transaction IDs in the block + transactionIDs []flowgo.Identifier + // mapping from transaction ID to transaction result + transactionResults map[flowgo.Identifier]IndexedTransactionResult + // current working ledger, updated after each transaction execution + ledgerState *state.ExecutionState + // events emitted during execution + events []flowgo.Event + // index of transaction execution + index uint32 +} + +// newPendingBlock creates a new pending block sequentially after a specified block. +func newPendingBlock( + prevBlock *flowgo.Block, + ledgerSnapshot snapshot.StorageSnapshot, + timestamp time.Time, +) *pendingBlock { + return &pendingBlock{ + height: prevBlock.Header.Height + 1, + // the view increments by between 1 and MaxViewIncrease to match + // behaviour on a real network, where views are not consecutive + view: prevBlock.Header.View + uint64(rand.Intn(MaxViewIncrease)+1), + parentID: prevBlock.ID(), + timestamp: timestamp, + transactions: make(map[flowgo.Identifier]*flowgo.TransactionBody), + transactionIDs: make([]flowgo.Identifier, 0), + transactionResults: make(map[flowgo.Identifier]IndexedTransactionResult), + ledgerState: state.NewExecutionState( + ledgerSnapshot, + state.DefaultParameters()), + events: make([]flowgo.Event, 0), + index: 0, + } +} + +// ID returns the ID of the pending block. +func (b *pendingBlock) ID() flowgo.Identifier { + return b.Block().ID() +} + +// Block returns the block information for the pending block. +func (b *pendingBlock) Block() *flowgo.Block { + collections := b.Collections() + + guarantees := make([]*flowgo.CollectionGuarantee, len(collections)) + for i, collection := range collections { + guarantees[i] = &flowgo.CollectionGuarantee{ + CollectionID: collection.ID(), + } + } + + return &flowgo.Block{ + Header: &flowgo.Header{ + Height: b.height, + View: b.view, + ParentID: b.parentID, + Timestamp: b.timestamp, + }, + Payload: &flowgo.Payload{ + Guarantees: guarantees, + }, + } +} + +func (b *pendingBlock) Collections() []*flowgo.LightCollection { + if len(b.transactionIDs) == 0 { + return []*flowgo.LightCollection{} + } + + transactionIDs := make([]flowgo.Identifier, len(b.transactionIDs)) + + // TODO: remove once SDK models are removed + copy(transactionIDs, b.transactionIDs) + + collection := flowgo.LightCollection{Transactions: transactionIDs} + + return []*flowgo.LightCollection{&collection} +} + +func (b *pendingBlock) Transactions() map[flowgo.Identifier]*flowgo.TransactionBody { + return b.transactions +} + +func (b *pendingBlock) TransactionResults() map[flowgo.Identifier]IndexedTransactionResult { + return b.transactionResults +} + +// Finalize returns the execution snapshot for the pending block. +func (b *pendingBlock) Finalize() *snapshot.ExecutionSnapshot { + return b.ledgerState.Finalize() +} + +// AddTransaction adds a transaction to the pending block. +func (b *pendingBlock) AddTransaction(tx flowgo.TransactionBody) { + b.transactionIDs = append(b.transactionIDs, tx.ID()) + b.transactions[tx.ID()] = &tx +} + +// ContainsTransaction checks if a transaction is included in the pending block. +func (b *pendingBlock) ContainsTransaction(txID flowgo.Identifier) bool { + _, exists := b.transactions[txID] + return exists +} + +// GetTransaction retrieves a transaction in the pending block by ID. +func (b *pendingBlock) GetTransaction(txID flowgo.Identifier) *flowgo.TransactionBody { + return b.transactions[txID] +} + +// NextTransaction returns the next indexed transaction. +func (b *pendingBlock) NextTransaction() *flowgo.TransactionBody { + if int(b.index) > len(b.transactionIDs) { + return nil + } + + txID := b.transactionIDs[b.index] + return b.GetTransaction(txID) +} + +// ExecuteNextTransaction executes the next transaction in the pending block. +// +// This function uses the provided execute function to perform the actual +// execution, then updates the pending block with the output. +func (b *pendingBlock) ExecuteNextTransaction( + vm *fvm.VirtualMachine, + ctx fvm.Context, +) ( + fvm.ProcedureOutput, + error, +) { + txnBody := b.NextTransaction() + txnIndex := b.index + + // increment transaction index even if transaction reverts + b.index++ + + executionSnapshot, output, err := vm.Run( + ctx, + fvm.Transaction(txnBody, txnIndex), + b.ledgerState) + if err != nil { + // fail fast if fatal error occurs + return fvm.ProcedureOutput{}, err + } + + b.events = append(b.events, output.Events...) + + err = b.ledgerState.Merge(executionSnapshot) + if err != nil { + // fail fast if fatal error occurs + return fvm.ProcedureOutput{}, err + } + + b.transactionResults[txnBody.ID()] = IndexedTransactionResult{ + ProcedureOutput: output, + Index: txnIndex, + } + + return output, nil +} + +// Events returns all events captured during the execution of the pending block. +func (b *pendingBlock) Events() []flowgo.Event { + return b.events +} + +// ExecutionStarted returns true if the pending block has started executing. +func (b *pendingBlock) ExecutionStarted() bool { + return b.index > 0 +} + +// ExecutionComplete returns true if the pending block is fully executed. +func (b *pendingBlock) ExecutionComplete() bool { + return b.index >= uint32(b.Size()) +} + +// Size returns the number of transactions in the pending block. +func (b *pendingBlock) Size() int { + return len(b.transactionIDs) +} + +// Empty returns true if the pending block is empty. +func (b *pendingBlock) Empty() bool { + return b.Size() == 0 +} + +func (b *pendingBlock) SetTimestamp(timestamp time.Time) { + b.timestamp = timestamp +} diff --git a/integration/internal/emulator/result.go b/integration/internal/emulator/result.go new file mode 100644 index 00000000000..a726a91d011 --- /dev/null +++ b/integration/internal/emulator/result.go @@ -0,0 +1,104 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "fmt" + + "github.com/onflow/cadence" + + flowsdk "github.com/onflow/flow-go-sdk" + + flowgo "github.com/onflow/flow-go/model/flow" +) + +type StorableTransactionResult struct { + ErrorCode int + ErrorMessage string + Logs []string + Events []flowgo.Event + BlockID flowgo.Identifier + BlockHeight uint64 +} + +// A TransactionResult is the result of executing a transaction. +type TransactionResult struct { + TransactionID flowsdk.Identifier + ComputationUsed uint64 + MemoryEstimate uint64 + Error error + Logs []string + Events []flowsdk.Event + Debug *TransactionResultDebug +} + +// Succeeded returns true if the transaction executed without errors. +func (r TransactionResult) Succeeded() bool { + return r.Error == nil +} + +// Reverted returns true if the transaction executed with errors. +func (r TransactionResult) Reverted() bool { + return !r.Succeeded() +} + +// TransactionResultDebug provides details about unsuccessful transaction execution +type TransactionResultDebug struct { + Message string + Meta map[string]any +} + +// NewTransactionInvalidSignature creates more debug details for transactions with invalid signature +func NewTransactionInvalidSignature( + tx *flowgo.TransactionBody, +) *TransactionResultDebug { + return &TransactionResultDebug{ + Message: "", + Meta: map[string]any{ + "payer": tx.Payer.String(), + "proposer": tx.ProposalKey.Address.String(), + "proposerKeyIndex": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), + "authorizers": fmt.Sprintf("%v", tx.Authorizers), + "gasLimit": fmt.Sprintf("%d", tx.GasLimit), + }, + } +} + +// TODO - this class should be part of SDK for consistency + +// A ScriptResult is the result of executing a script. +type ScriptResult struct { + ScriptID flowgo.Identifier + Value cadence.Value + Error error + Logs []string + Events []flowgo.Event + ComputationUsed uint64 + MemoryEstimate uint64 +} + +// Succeeded returns true if the script executed without errors. +func (r ScriptResult) Succeeded() bool { + return r.Error == nil +} + +// Reverted returns true if the script executed with errors. +func (r ScriptResult) Reverted() bool { + return !r.Succeeded() +} diff --git a/integration/internal/emulator/sdk.go b/integration/internal/emulator/sdk.go new file mode 100644 index 00000000000..06b6085ec6a --- /dev/null +++ b/integration/internal/emulator/sdk.go @@ -0,0 +1,551 @@ +package emulator + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/stdlib" + "github.com/onflow/flow/protobuf/go/flow/entities" + + sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/access" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// SDKAdapter wraps an emulated emulator and implements the RPC handlers +// required by the Access API. +type SDKAdapter struct { + logger *zerolog.Logger + emulator Emulator +} + +func (b *SDKAdapter) EnableAutoMine() { + b.emulator.EnableAutoMine() +} +func (b *SDKAdapter) DisableAutoMine() { + b.emulator.DisableAutoMine() +} + +func (b *SDKAdapter) Emulator() Emulator { + return b.emulator +} + +// NewSDKAdapter returns a new SDKAdapter. +func NewSDKAdapter(logger *zerolog.Logger, emulator Emulator) *SDKAdapter { + return &SDKAdapter{ + logger: logger, + emulator: emulator, + } +} + +func (b *SDKAdapter) Ping(ctx context.Context) error { + return b.emulator.Ping() +} + +func (b *SDKAdapter) GetChainID(ctx context.Context) sdk.ChainID { + return sdk.ChainID(b.emulator.GetNetworkParameters().ChainID) +} + +// GetLatestBlockHeader gets the latest sealed block header. +func (b *SDKAdapter) GetLatestBlockHeader( + _ context.Context, + _ bool, +) ( + *sdk.BlockHeader, + sdk.BlockStatus, + error, +) { + block, err := b.emulator.GetLatestBlock() + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + blockHeader := sdk.BlockHeader{ + ID: sdk.Identifier(block.ID()), + ParentID: sdk.Identifier(block.Header.ParentID), + Height: block.Header.Height, + Timestamp: block.Header.Timestamp, + } + return &blockHeader, sdk.BlockStatusSealed, nil +} + +// GetBlockHeaderByHeight gets a block header by height. +func (b *SDKAdapter) GetBlockHeaderByHeight( + _ context.Context, + height uint64, +) ( + *sdk.BlockHeader, + sdk.BlockStatus, + error, +) { + block, err := b.emulator.GetBlockByHeight(height) + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + blockHeader := sdk.BlockHeader{ + ID: sdk.Identifier(block.ID()), + ParentID: sdk.Identifier(block.Header.ParentID), + Height: block.Header.Height, + Timestamp: block.Header.Timestamp, + } + return &blockHeader, sdk.BlockStatusSealed, nil +} + +// GetBlockHeaderByID gets a block header by ID. +func (b *SDKAdapter) GetBlockHeaderByID( + _ context.Context, + id sdk.Identifier, +) ( + *sdk.BlockHeader, + sdk.BlockStatus, + error, +) { + block, err := b.emulator.GetBlockByID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, sdk.BlockStatusUnknown, err + } + blockHeader := sdk.BlockHeader{ + ID: sdk.Identifier(block.ID()), + ParentID: sdk.Identifier(block.Header.ParentID), + Height: block.Header.Height, + Timestamp: block.Header.Timestamp, + } + return &blockHeader, sdk.BlockStatusSealed, nil +} + +// GetLatestBlock gets the latest sealed block. +func (b *SDKAdapter) GetLatestBlock( + _ context.Context, + _ bool, +) ( + *sdk.Block, + sdk.BlockStatus, + error, +) { + flowBlock, err := b.emulator.GetLatestBlock() + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + block := sdk.Block{ + BlockHeader: sdk.BlockHeader{ + ID: sdk.Identifier(flowBlock.ID()), + ParentID: sdk.Identifier(flowBlock.Header.ParentID), + Height: flowBlock.Header.Height, + Timestamp: flowBlock.Header.Timestamp, + }, + BlockPayload: convertBlockPayload(flowBlock.Payload), + } + return &block, sdk.BlockStatusSealed, nil +} + +// GetBlockByHeight gets a block by height. +func (b *SDKAdapter) GetBlockByHeight( + ctx context.Context, + height uint64, +) ( + *sdk.Block, + sdk.BlockStatus, + error, +) { + flowBlock, err := b.emulator.GetBlockByHeight(height) + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + block := sdk.Block{ + BlockHeader: sdk.BlockHeader{ + ID: sdk.Identifier(flowBlock.ID()), + ParentID: sdk.Identifier(flowBlock.Header.ParentID), + Height: flowBlock.Header.Height, + Timestamp: flowBlock.Header.Timestamp, + }, + BlockPayload: convertBlockPayload(flowBlock.Payload), + } + return &block, sdk.BlockStatusSealed, nil +} + +// GetBlockByID gets a block by ID. +func (b *SDKAdapter) GetBlockByID( + _ context.Context, + id sdk.Identifier, +) ( + *sdk.Block, + sdk.BlockStatus, + error, +) { + flowBlock, err := b.emulator.GetBlockByID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + block := sdk.Block{ + BlockHeader: sdk.BlockHeader{ + ID: sdk.Identifier(flowBlock.ID()), + ParentID: sdk.Identifier(flowBlock.Header.ParentID), + Height: flowBlock.Header.Height, + Timestamp: flowBlock.Header.Timestamp, + }, + BlockPayload: convertBlockPayload(flowBlock.Payload), + } + return &block, sdk.BlockStatusSealed, nil +} + +func convertBlockPayload(payload *flowgo.Payload) sdk.BlockPayload { + var seals []*sdk.BlockSeal + sealCount := len(payload.Seals) + if sealCount > 0 { + seals = make([]*sdk.BlockSeal, 0, sealCount) + for _, seal := range payload.Seals { + seals = append(seals, &sdk.BlockSeal{ + BlockID: sdk.Identifier(seal.BlockID), + ExecutionReceiptID: sdk.Identifier(seal.ResultID), + }) + } + } + + var collectionGuarantees []*sdk.CollectionGuarantee + guaranteesCount := len(payload.Guarantees) + if guaranteesCount > 0 { + collectionGuarantees = make([]*sdk.CollectionGuarantee, 0, guaranteesCount) + for _, guarantee := range payload.Guarantees { + collectionGuarantees = append(collectionGuarantees, &sdk.CollectionGuarantee{ + CollectionID: sdk.Identifier(guarantee.CollectionID), + }) + } + } + + return sdk.BlockPayload{ + Seals: seals, + CollectionGuarantees: collectionGuarantees, + } +} + +// GetCollectionByID gets a collection by ID. +func (b *SDKAdapter) GetCollectionByID( + _ context.Context, + id sdk.Identifier, +) (*sdk.Collection, error) { + flowCollection, err := b.emulator.GetCollectionByID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + collection := FlowLightCollectionToSDK(*flowCollection) + return &collection, nil +} + +func (b *SDKAdapter) SendTransaction(ctx context.Context, tx sdk.Transaction) error { + flowTx := SDKTransactionToFlow(tx) + return b.emulator.SendTransaction(flowTx) +} + +// GetTransaction gets a transaction by ID. +func (b *SDKAdapter) GetTransaction( + ctx context.Context, + id sdk.Identifier, +) (*sdk.Transaction, error) { + tx, err := b.emulator.GetTransaction(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + sdkTx := FlowTransactionToSDK(*tx) + return &sdkTx, nil +} + +// GetTransactionResult gets a transaction by ID. +func (b *SDKAdapter) GetTransactionResult( + ctx context.Context, + id sdk.Identifier, +) (*sdk.TransactionResult, error) { + flowResult, err := b.emulator.GetTransactionResult(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + return FlowTransactionResultToSDK(flowResult) +} + +// GetAccount returns an account by address at the latest sealed block. +func (b *SDKAdapter) GetAccount( + ctx context.Context, + address sdk.Address, +) (*sdk.Account, error) { + account, err := b.getAccount(address) + if err != nil { + return nil, err + } + return account, nil +} + +// GetAccountAtLatestBlock returns an account by address at the latest sealed block. +func (b *SDKAdapter) GetAccountAtLatestBlock( + ctx context.Context, + address sdk.Address, +) (*sdk.Account, error) { + account, err := b.getAccount(address) + if err != nil { + return nil, err + } + return account, nil +} + +func (b *SDKAdapter) getAccount(address sdk.Address) (*sdk.Account, error) { + account, err := b.emulator.GetAccount(SDKAddressToFlow(address)) + if err != nil { + return nil, err + } + return FlowAccountToSDK(*account) +} + +func (b *SDKAdapter) GetAccountAtBlockHeight( + ctx context.Context, + address sdk.Address, + height uint64, +) (*sdk.Account, error) { + account, err := b.emulator.GetAccountAtBlockHeight(SDKAddressToFlow(address), height) + if err != nil { + return nil, err + } + return FlowAccountToSDK(*account) +} + +// ExecuteScriptAtLatestBlock executes a script at a the latest block +func (b *SDKAdapter) ExecuteScriptAtLatestBlock( + ctx context.Context, + script []byte, + arguments [][]byte, +) ([]byte, error) { + block, err := b.emulator.GetLatestBlock() + if err != nil { + return nil, err + } + return b.executeScriptAtBlock(script, arguments, block.Header.Height) +} + +// ExecuteScriptAtBlockHeight executes a script at a specific block height +func (b *SDKAdapter) ExecuteScriptAtBlockHeight( + ctx context.Context, + blockHeight uint64, + script []byte, + arguments [][]byte, +) ([]byte, error) { + return b.executeScriptAtBlock(script, arguments, blockHeight) +} + +// ExecuteScriptAtBlockID executes a script at a specific block ID +func (b *SDKAdapter) ExecuteScriptAtBlockID( + ctx context.Context, + blockID sdk.Identifier, + script []byte, + arguments [][]byte, +) ([]byte, error) { + block, err := b.emulator.GetBlockByID(SDKIdentifierToFlow(blockID)) + if err != nil { + return nil, err + } + return b.executeScriptAtBlock(script, arguments, block.Header.Height) +} + +// executeScriptAtBlock is a helper for executing a script at a specific block +func (b *SDKAdapter) executeScriptAtBlock(script []byte, arguments [][]byte, blockHeight uint64) ([]byte, error) { + result, err := b.emulator.ExecuteScriptAtBlockHeight(script, arguments, blockHeight) + if err != nil { + return nil, err + } + if !result.Succeeded() { + return nil, result.Error + } + valueBytes, err := jsoncdc.Encode(result.Value) + if err != nil { + return nil, err + } + return valueBytes, nil +} + +func (b *SDKAdapter) GetLatestProtocolStateSnapshot(_ context.Context) ([]byte, error) { + return nil, nil +} + +func (a *SDKAdapter) GetProtocolStateSnapshotByBlockID(_ context.Context, _ flowgo.Identifier) ([]byte, error) { + return nil, nil +} + +func (a *SDKAdapter) GetProtocolStateSnapshotByHeight(_ context.Context, _ uint64) ([]byte, error) { + return nil, nil +} + +func (b *SDKAdapter) GetExecutionResultForBlockID(_ context.Context, _ sdk.Identifier) (*sdk.ExecutionResult, error) { + return nil, nil +} + +func (b *SDKAdapter) GetSystemTransaction(_ context.Context, _ flowgo.Identifier) (*flowgo.TransactionBody, error) { + return nil, nil +} + +func (b *SDKAdapter) GetSystemTransactionResult(_ context.Context, _ flowgo.Identifier, _ entities.EventEncodingVersion) (*access.TransactionResult, error) { + return nil, nil +} + +func (b *SDKAdapter) GetTransactionsByBlockID(ctx context.Context, id sdk.Identifier) ([]*sdk.Transaction, error) { + result := []*sdk.Transaction{} + transactions, err := b.emulator.GetTransactionsByBlockID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + for _, transaction := range transactions { + sdkTransaction := FlowTransactionToSDK(*transaction) + result = append(result, &sdkTransaction) + + } + return result, nil +} + +func (b *SDKAdapter) GetTransactionResultsByBlockID(ctx context.Context, id sdk.Identifier) ([]*sdk.TransactionResult, error) { + result := []*sdk.TransactionResult{} + transactionResults, err := b.emulator.GetTransactionResultsByBlockID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + for _, transactionResult := range transactionResults { + sdkResult, err := FlowTransactionResultToSDK(transactionResult) + if err != nil { + return nil, err + } + result = append(result, sdkResult) + } + return result, nil +} + +func (b *SDKAdapter) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []sdk.Identifier) ([]*sdk.BlockEvents, error) { + result := []*sdk.BlockEvents{} + flowBlockEvents, err := b.emulator.GetEventsForBlockIDs(eventType, SDKIdentifiersToFlow(blockIDs)) + if err != nil { + return nil, err + } + + for _, flowBlockEvent := range flowBlockEvents { + sdkEvents, err := FlowEventsToSDK(flowBlockEvent.Events) + if err != nil { + return nil, err + } + + sdkBlockEvents := &sdk.BlockEvents{ + BlockID: sdk.Identifier(flowBlockEvent.BlockID), + Height: flowBlockEvent.BlockHeight, + BlockTimestamp: flowBlockEvent.BlockTimestamp, + Events: sdkEvents, + } + + result = append(result, sdkBlockEvents) + + } + + return result, nil +} + +func (b *SDKAdapter) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight, endHeight uint64) ([]*sdk.BlockEvents, error) { + result := []*sdk.BlockEvents{} + + flowBlockEvents, err := b.emulator.GetEventsForHeightRange(eventType, startHeight, endHeight) + if err != nil { + return nil, err + } + + for _, flowBlockEvent := range flowBlockEvents { + sdkEvents, err := FlowEventsToSDK(flowBlockEvent.Events) + + if err != nil { + return nil, err + } + + sdkBlockEvents := &sdk.BlockEvents{ + BlockID: sdk.Identifier(flowBlockEvent.BlockID), + Height: flowBlockEvent.BlockHeight, + BlockTimestamp: flowBlockEvent.BlockTimestamp, + Events: sdkEvents, + } + + result = append(result, sdkBlockEvents) + + } + + return result, nil +} + +// CreateAccount submits a transaction to create a new account with the given +// account keys and contracts. The transaction is paid by the service account. +func (b *SDKAdapter) CreateAccount(ctx context.Context, publicKeys []*sdk.AccountKey, contracts []templates.Contract) (sdk.Address, error) { + + serviceKey := b.emulator.ServiceKey() + latestBlock, err := b.emulator.GetLatestBlock() + serviceAddress := FlowAddressToSDK(serviceKey.Address) + + if err != nil { + return sdk.Address{}, err + } + + if publicKeys == nil { + publicKeys = []*sdk.AccountKey{} + } + tx, err := templates.CreateAccount(publicKeys, contracts, serviceAddress) + if err != nil { + return sdk.Address{}, err + } + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetReferenceBlockID(sdk.Identifier(latestBlock.ID())). + SetProposalKey(serviceAddress, serviceKey.Index, serviceKey.SequenceNumber). + SetPayer(serviceAddress) + + signer, err := serviceKey.Signer() + if err != nil { + return sdk.Address{}, err + } + + err = tx.SignEnvelope(serviceAddress, serviceKey.Index, signer) + if err != nil { + return sdk.Address{}, err + } + + err = b.SendTransaction(ctx, *tx) + if err != nil { + return sdk.Address{}, err + } + + _, results, err := b.emulator.ExecuteAndCommitBlock() + if err != nil { + return sdk.Address{}, err + } + lastResult := results[len(results)-1] + + _, err = b.emulator.CommitBlock() + if err != nil { + return sdk.Address{}, err + } + + if !lastResult.Succeeded() { + return sdk.Address{}, lastResult.Error + } + + var address sdk.Address + + for _, event := range lastResult.Events { + if event.Type == sdk.EventAccountCreated { + addressFieldValue := cadence.SearchFieldByName( + event.Value, + stdlib.AccountEventAddressParameter.Identifier, + ) + address = sdk.Address(addressFieldValue.(cadence.Address)) + break + } + } + + if address == (sdk.Address{}) { + return sdk.Address{}, fmt.Errorf("failed to find AccountCreated event") + } + + return address, nil +} diff --git a/integration/internal/emulator/store.go b/integration/internal/emulator/store.go new file mode 100644 index 00000000000..2035268a488 --- /dev/null +++ b/integration/internal/emulator/store.go @@ -0,0 +1,98 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package storage defines the interface and implementations for interacting with +// persistent chain state. +package emulator + +import ( + "context" + + "github.com/psiemens/graceland" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/snapshot" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// EmulatorStorage defines the storage layer for persistent chain state. +// +// This includes finalized blocks and transactions, and the resultant register +// states and emitted events. It does not include pending state, such as pending +// transactions and register states. +// +// Implementations must distinguish between not found errors and errors with +// the underlying storage by returning an instance of store.ErrNotFound if a +// resource cannot be found. +// +// Implementations must be safe for use by multiple goroutines. + +type EmulatorStorage interface { + graceland.Routine + environment.Blocks + access.Blocks + LatestBlockHeight(ctx context.Context) (uint64, error) + + // LatestBlock returns the block with the highest block height. + LatestBlock(ctx context.Context) (flowgo.Block, error) + + // StoreBlock stores the block in storage. If the exactly same block is already in a storage, return successfully + StoreBlock(ctx context.Context, block *flowgo.Block) error + + // BlockByID returns the block with the given hash. It is available for + // finalized and ambiguous blocks. + BlockByID(ctx context.Context, blockID flowgo.Identifier) (*flowgo.Block, error) + + // BlockByHeight returns the block at the given height. It is only available + // for finalized blocks. + BlockByHeight(ctx context.Context, height uint64) (*flowgo.Block, error) + + // CommitBlock atomically saves the execution results for a block. + CommitBlock( + ctx context.Context, + block flowgo.Block, + collections []*flowgo.LightCollection, + transactions map[flowgo.Identifier]*flowgo.TransactionBody, + transactionResults map[flowgo.Identifier]*StorableTransactionResult, + executionSnapshot *snapshot.ExecutionSnapshot, + events []flowgo.Event, + ) error + + // CollectionByID gets the collection (transaction IDs only) with the given ID. + CollectionByID(ctx context.Context, collectionID flowgo.Identifier) (flowgo.LightCollection, error) + + // FullCollectionByID gets the full collection (including transaction bodies) with the given ID. + FullCollectionByID(ctx context.Context, collectionID flowgo.Identifier) (flowgo.Collection, error) + + // TransactionByID gets the transaction with the given ID. + TransactionByID(ctx context.Context, transactionID flowgo.Identifier) (flowgo.TransactionBody, error) + + // TransactionResultByID gets the transaction result with the given ID. + TransactionResultByID(ctx context.Context, transactionID flowgo.Identifier) (StorableTransactionResult, error) + + // LedgerByHeight returns a storage snapshot into the ledger state + // at a given block. + LedgerByHeight( + ctx context.Context, + blockHeight uint64, + ) (snapshot.StorageSnapshot, error) + + // EventsByHeight returns the events in the block at the given height, optionally filtered by type. + EventsByHeight(ctx context.Context, blockHeight uint64, eventType string) ([]flowgo.Event, error) +} diff --git a/integration/internal/emulator/templates/systemChunkTransactionTemplate.cdc b/integration/internal/emulator/templates/systemChunkTransactionTemplate.cdc new file mode 100644 index 00000000000..ef714ed20ba --- /dev/null +++ b/integration/internal/emulator/templates/systemChunkTransactionTemplate.cdc @@ -0,0 +1,16 @@ +import RandomBeaconHistory from "RandomBeaconHistory" +import EVM from "EVM" + +transaction { + prepare(serviceAccount: auth(BorrowValue) &Account) { + let randomBeaconHistoryHeartbeat = serviceAccount.storage + .borrow<&RandomBeaconHistory.Heartbeat>(from: RandomBeaconHistory.HeartbeatStoragePath) + ?? panic("Couldn't borrow RandomBeaconHistory.Heartbeat Resource") + randomBeaconHistoryHeartbeat.heartbeat(randomSourceHistory: randomSourceHistory()) + + let evmHeartbeat = serviceAccount.storage + .borrow<&EVM.Heartbeat>(from: /storage/EVMHeartbeat) + ?? panic("Couldn't borrow EVM.Heartbeat Resource") + evmHeartbeat.heartbeat() + } +} diff --git a/integration/internal/emulator/tests/accounts_test.go b/integration/internal/emulator/tests/accounts_test.go new file mode 100644 index 00000000000..8bf3680db48 --- /dev/null +++ b/integration/internal/emulator/tests/accounts_test.go @@ -0,0 +1,1218 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + "github.com/onflow/flow-go-sdk/test" + + fvmerrors "github.com/onflow/flow-go/fvm/errors" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +const testContract = "access(all) contract Test {}" + +func setupAccountTests(t *testing.T, opts ...emulator.Option) ( + *emulator.Blockchain, + *emulator.SDKAdapter, +) { + b, err := emulator.New( + opts..., + ) + require.NoError(t, err) + + logger := zerolog.Nop() + return b, emulator.NewSDKAdapter(&logger, b) +} + +func TestGetAccount(t *testing.T) { + + t.Parallel() + + t.Run("Get account at latest block height", func(t *testing.T) { + + t.Parallel() + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + + }) + + t.Run("Get account at latest block by index", func(t *testing.T) { + + t.Parallel() + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + + flowAccount, err := b.GetAccountByIndex(1) //service account + assert.NoError(t, err) + + assert.Equal(t, uint64(0), flowAccount.Keys[0].SeqNumber) + assert.Equal(t, acc.Address.String(), flowAccount.Address.String()) + + }) + + t.Run("Get account at latest block height", func(t *testing.T) { + + t.Parallel() + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + + flowAccount, err := b.GetAccountByIndex(1) //service account + assert.NoError(t, err) + + assert.Equal(t, uint64(0), flowAccount.Keys[0].SeqNumber) + assert.Equal(t, acc.Address.String(), flowAccount.Address.String()) + + }) + + t.Run("Get account at specified block height", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + contract := templates.Contract{ + Name: "Test", + Source: testContract, + } + + tx := templates.AddAccountContract(serviceAccountAddress, contract) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + bl, err := b.CommitBlock() + assert.NoError(t, err) + + accNow, err := adapter.GetAccountAtBlockHeight(context.Background(), serviceAccountAddress, bl.Header.Height) + assert.NoError(t, err) + + accPrev, err := adapter.GetAccountAtBlockHeight(context.Background(), serviceAccountAddress, bl.Header.Height-uint64(1)) + assert.NoError(t, err) + + assert.Equal(t, accNow.Keys[0].SequenceNumber, uint64(1)) + assert.Equal(t, accPrev.Keys[0].SequenceNumber, uint64(0)) + }) +} + +func TestCreateAccount(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + t.Run("Simple addresses", func(t *testing.T) { + b, adapter := setupAccountTests( + t, + emulator.WithSimpleAddresses(), + ) + + accountKey := accountKeys.New() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + assert.Equal(t, "0000000000000006", account.Address.Hex()) + assert.Equal(t, uint64(0x186a0), account.Balance) + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Empty(t, account.Contracts) + }) + + t.Run("Single public keys", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Empty(t, account.Contracts) + }) + + t.Run("Multiple public keys", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKeyA := accountKeys.New() + accountKeyB := accountKeys.New() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKeyA, accountKeyB}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 2) + assert.Equal(t, accountKeyA.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Equal(t, accountKeyB.PublicKey.Encode(), account.Keys[1].PublicKey.Encode()) + assert.Empty(t, account.Contracts) + }) + + t.Run("Public keys and contract", func(t *testing.T) { + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKeyA := accountKeys.New() + accountKeyB := accountKeys.New() + + contracts := []templates.Contract{ + { + Name: "Test", + Source: testContract, + }, + } + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKeyA, accountKeyB}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 2) + assert.Equal(t, accountKeyA.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Equal(t, accountKeyB.PublicKey.Encode(), account.Keys[1].PublicKey.Encode()) + assert.Equal(t, + map[string][]byte{ + "Test": []byte(testContract), + }, + account.Contracts, + ) + }) + + t.Run("Public keys and two contracts", func(t *testing.T) { + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + codeA := ` + access(all) contract Test1 { + access(all) fun a(): Int { + return 1 + } + } + ` + codeB := ` + access(all) contract Test2 { + access(all) fun b(): Int { + return 2 + } + } + ` + + accountKey := accountKeys.New() + + contracts := []templates.Contract{ + { + Name: "Test1", + Source: codeA, + }, + { + Name: "Test2", + Source: codeB, + }, + } + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Equal(t, + map[string][]byte{ + "Test1": []byte(codeA), + "Test2": []byte(codeB), + }, + account.Contracts, + ) + }) + + t.Run("Code and no keys", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: testContract, + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + nil, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + assert.Empty(t, account.Keys) + assert.Equal(t, + map[string][]byte{ + "Test": []byte(testContract), + }, + account.Contracts, + ) + }) + + t.Run("Event emitted", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + + contracts := []templates.Contract{ + { + Name: "Test", + Source: testContract, + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + block, err := b.CommitBlock() + require.NoError(t, err) + + events, err := adapter.GetEventsForHeightRange(context.Background(), flowsdk.EventAccountCreated, block.Header.Height, block.Header.Height) + require.NoError(t, err) + require.Len(t, events, 1) + + accountEvent := flowsdk.AccountCreatedEvent(events[0].Events[0]) + + account, err := adapter.GetAccount(context.Background(), accountEvent.Address()) + assert.NoError(t, err) + + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey, account.Keys[0].PublicKey) + assert.Equal(t, + map[string][]byte{ + "Test": []byte(testContract), + }, + account.Contracts, + ) + }) + + t.Run("Invalid hash algorithm", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + accountKey.SetHashAlgo(crypto.SHA3_384) // SHA3_384 is invalid for ECDSA_P256 + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + assert.True(t, result.Reverted()) + }) + + t.Run("Invalid code", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: "not a valid script", + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + nil, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + assert.True(t, result.Reverted()) + }) + + t.Run("Invalid contract name", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test2", + Source: testContract, + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + nil, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + assert.True(t, result.Reverted()) + }) +} + +func TestAddAccountKey(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + t.Run("Valid key", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + newAccountKey, newSigner := accountKeys.NewWithSigner() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx1, err := templates.AddAccountKey(serviceAccountAddress, newAccountKey) + assert.NoError(t, err) + + tx1.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + script := []byte("transaction { execute {} }") + + var newKeyID = uint32(1) // new key will have ID 1 + var newKeySequenceNum uint64 = 0 + + tx2 := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, newKeyID, newKeySequenceNum). + SetPayer(serviceAccountAddress) + + err = tx2.SignEnvelope(serviceAccountAddress, newKeyID, newSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx2) + require.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + }) + + t.Run("Invalid hash algorithm", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + accountKey.SetHashAlgo(crypto.SHA3_384) // SHA3_384 is invalid for ECDSA_P256 + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.AddAccountKey(serviceAccountAddress, accountKey) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + }) +} + +func TestRemoveAccountKey(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + + accountKeys := test.AccountKeyGenerator() + + newAccountKey, newSigner := accountKeys.NewWithSigner() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // create transaction that adds public key to account keys + tx1, err := templates.AddAccountKey(serviceAccountAddress, newAccountKey) + assert.NoError(t, err) + + // create transaction that adds public key to account keys + tx1.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with service key + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // submit tx1 (should succeed) + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + require.Len(t, account.Keys, 2) + assert.False(t, account.Keys[0].Revoked) + assert.False(t, account.Keys[1].Revoked) + + // create transaction that removes service key + tx2 := templates.RemoveAccountKey(serviceAccountAddress, 0) + + tx2.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with service key + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + assert.NoError(t, err) + + // submit tx2 (should succeed) + err = adapter.SendTransaction(context.Background(), *tx2) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + // key at index 0 should be revoked + require.Len(t, account.Keys, 2) + assert.True(t, account.Keys[0].Revoked) + assert.False(t, account.Keys[1].Revoked) + + // create transaction that removes remaining account key + tx3 := templates.RemoveAccountKey(serviceAccountAddress, 0) + + tx3.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with service key (that has been removed) + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx3.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + assert.NoError(t, err) + + // submit tx3 (should fail) + err = adapter.SendTransaction(context.Background(), *tx3) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + var sigErr fvmerrors.CodedError + assert.ErrorAs(t, result.Error, &sigErr) + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeInvalidProposalSignatureError)) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + // key at index 1 should not be revoked + require.Len(t, account.Keys, 2) + assert.True(t, account.Keys[0].Revoked) + assert.False(t, account.Keys[1].Revoked) + + // create transaction that removes remaining account key + tx4 := templates.RemoveAccountKey(serviceAccountAddress, 1) + + tx4.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, account.Keys[1].Index, account.Keys[1].SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with remaining account key + err = tx4.SignEnvelope(serviceAccountAddress, account.Keys[1].Index, newSigner) + assert.NoError(t, err) + + // submit tx4 (should succeed) + err = adapter.SendTransaction(context.Background(), *tx4) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + // all keys should be revoked + for _, key := range account.Keys { + assert.True(t, key.Revoked) + } +} + +func TestUpdateAccountCode(t *testing.T) { + + t.Parallel() + + const codeA = ` + access(all) contract Test { + access(all) fun a(): Int { + return 1 + } + } + ` + + const codeB = ` + access(all) contract Test { + access(all) fun b(): Int { + return 2 + } + } + ` + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, signerB := accountKeys.NewWithSigner() + + t.Run("Valid signature", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: codeA, + }, + } + + accountAddressB, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKeyB}, + contracts, + ) + require.NoError(t, err) + + account, err := adapter.GetAccount(context.Background(), accountAddressB) + require.NoError(t, err) + + assert.Equal(t, + map[string][]byte{ + "Test": []byte(codeA), + }, + account.Contracts, + ) + + tx := templates.UpdateAccountContract( + accountAddressB, + templates.Contract{ + Name: "Test", + Source: codeB, + }, + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err = tx.SignPayload(accountAddressB, 0, signerB) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), accountAddressB) + assert.NoError(t, err) + + assert.Equal(t, codeB, string(account.Contracts["Test"])) + }) + + t.Run("Invalid signature", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: codeA, + }, + } + + accountAddressB, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKeyB}, + contracts, + ) + require.NoError(t, err) + + account, err := adapter.GetAccount(context.Background(), accountAddressB) + require.NoError(t, err) + + assert.Equal(t, codeA, string(account.Contracts["Test"])) + + tx := templates.UpdateAccountContract( + accountAddressB, + templates.Contract{ + Name: "Test", + Source: codeB, + }, + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), accountAddressB) + assert.NoError(t, err) + + // code should not be updated + assert.Equal(t, codeA, string(account.Contracts["Test"])) + }) +} + +func TestImportAccountCode(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + + accountContracts := []templates.Contract{ + { + Name: "Computer", + Source: ` + access(all) contract Computer { + access(all) fun answer(): Int { + return 42 + } + } + `, + }, + } + + address, err := adapter.CreateAccount(context.Background(), nil, accountContracts) + assert.NoError(t, err) + + script := []byte(fmt.Sprintf(` + // address imports can omit leading zeros + import 0x%s + + transaction { + execute { + let answer = Computer.answer() + if answer != 42 { + panic("?!") + } + } + } + `, address)) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) +} + +func TestAccountAccess(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + + // Create first account and deploy a contract A + // which has a field + // which only other code in the same should be allowed to access + + accountContracts := []templates.Contract{ + { + Name: "A", + Source: ` + access(all) contract A { + access(account) let a: Int + + init() { + self.a = 1 + } + } + `, + }, + } + + accountKeys := test.AccountKeyGenerator() + + accountKey1, signer1 := accountKeys.NewWithSigner() + + address1, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKey1}, + accountContracts, + ) + assert.NoError(t, err) + + // Deploy another contract B to the same account + // which accesses the field in contract A + // which allows access to code in the same account + + tx := templates.AddAccountContract( + address1, + templates.Contract{ + Name: "B", + Source: fmt.Sprintf(` + import A from 0x%s + + access(all) contract B { + access(all) fun use() { + let b = A.a + } + } + `, + address1.Hex(), + ), + }, + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err = tx.SignPayload(address1, 0, signer1) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + require.NoError(t, err) + + // Create another account 2 + + accountKey2, signer2 := accountKeys.NewWithSigner() + + address2, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKey2}, + nil, + ) + assert.NoError(t, err) + + // Deploy a contract C to the second account + // which accesses the field in contract A of the first account + // which allows access to code in the same account + + tx = templates.AddAccountContract( + address2, + templates.Contract{ + Name: "C", + Source: fmt.Sprintf(` + import A from 0x%s + + access(all) contract C { + access(all) fun use() { + let b = A.a + } + } + `, + address1.Hex(), + ), + }, + ) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err = tx.SignPayload(address2, 0, signer2) + require.NoError(t, err) + + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + require.NoError(t, err) + + require.False(t, result.Succeeded()) + require.Error(t, result.Error) + + require.Contains( + t, + result.Error.Error(), + "error: cannot access `a`: field requires `account` authorization", + ) +} diff --git a/integration/internal/emulator/tests/attachments_test.go b/integration/internal/emulator/tests/attachments_test.go new file mode 100644 index 00000000000..7d8f72dcdc5 --- /dev/null +++ b/integration/internal/emulator/tests/attachments_test.go @@ -0,0 +1,50 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/internal/emulator" +) + +func TestAttachments(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + script := ` + access(all) resource R {} + + access(all) attachment A for R {} + + access(all) fun main() { + let r <- create R() + r[A] + destroy r + } + ` + + _, err = b.ExecuteScript([]byte(script), nil) + require.NoError(t, err) +} diff --git a/integration/internal/emulator/tests/block_info_test.go b/integration/internal/emulator/tests/block_info_test.go new file mode 100644 index 00000000000..06a937b8195 --- /dev/null +++ b/integration/internal/emulator/tests/block_info_test.go @@ -0,0 +1,113 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestBlockInfo(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + block1, err := b.CommitBlock() + require.NoError(t, err) + + block2, err := b.CommitBlock() + require.NoError(t, err) + + t.Run("works as transaction", func(t *testing.T) { + tx := flowsdk.NewTransaction(). + SetScript([]byte(` + transaction { + execute { + let block = getCurrentBlock() + log(block) + + let lastBlock = getBlock(at: block.height - 1) + log(lastBlock) + } + } + `)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + require.Len(t, result.Logs, 2) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block2.Header.Height+1, + b.PendingBlockView(), b.PendingBlockID(), float64(b.PendingBlockTimestamp().Unix())), result.Logs[0]) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block2.Header.Height, + block2.Header.View, block2.ID(), float64(block2.Header.Timestamp.Unix())), result.Logs[1]) + }) + + t.Run("works as script", func(t *testing.T) { + script := []byte(` + access(all) fun main() { + let block = getCurrentBlock() + log(block) + + let lastBlock = getBlock(at: block.height - 1) + log(lastBlock) + } + `) + + result, err := b.ExecuteScript(script, nil) + assert.NoError(t, err) + + assert.True(t, result.Succeeded()) + + require.Len(t, result.Logs, 2) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block2.Header.Height, + block2.Header.View, block2.ID(), float64(block2.Header.Timestamp.Unix())), result.Logs[0]) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block1.Header.Height, + block1.Header.View, block1.ID(), float64(block1.Header.Timestamp.Unix())), result.Logs[1]) + }) +} diff --git a/integration/internal/emulator/tests/block_test.go b/integration/internal/emulator/tests/block_test.go new file mode 100644 index 00000000000..b0bebb993f9 --- /dev/null +++ b/integration/internal/emulator/tests/block_test.go @@ -0,0 +1,178 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestCommitBlock(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + + require.NoError(t, err) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx1 to pending block + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx1.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, tx1Result.Status) + + tx2 := flowsdk.NewTransaction(). + SetScript([]byte(`transaction { execute { panic("revert!") } }`)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err = b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx2 to pending block + err = adapter.SendTransaction(context.Background(), *tx2) + require.NoError(t, err) + + tx2Result, err := adapter.GetTransactionResult(context.Background(), tx2.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, tx2Result.Status) + + // Execute tx1 + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Succeeded()) + + // Execute tx2 + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + // Commit tx1 and tx2 into new block + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err = adapter.GetTransactionResult(context.Background(), tx1.ID()) + require.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + + // tx2 status also becomes TransactionStatusSealed, even though it is reverted + tx2Result, err = adapter.GetTransactionResult(context.Background(), tx2.ID()) + require.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx2Result.Status) + assert.Error(t, tx2Result.Error) +} + +func TestBlockView(t *testing.T) { + + t.Parallel() + + const nBlocks = 3 + + b, err := emulator.New() + require.NoError(t, err) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + t.Run("genesis should have 0 view", func(t *testing.T) { + block, err := b.GetBlockByHeight(0) + require.NoError(t, err) + assert.Equal(t, uint64(0), block.Header.Height) + assert.Equal(t, uint64(0), block.Header.View) + }) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // create a few blocks, each with one transaction + for i := 0; i < nBlocks; i++ { + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx to pending block + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // execute and commit the block + _, _, err = b.ExecuteAndCommitBlock() + require.NoError(t, err) + } + const MaxViewIncrease = 3 + + for height := uint64(1); height <= nBlocks+1; height++ { + block, err := b.GetBlockByHeight(height) + require.NoError(t, err) + + maxView := height * MaxViewIncrease + t.Run(fmt.Sprintf("block %d should have view <%d", height, maxView), func(t *testing.T) { + assert.Equal(t, height, block.Header.Height) + assert.LessOrEqual(t, block.Header.View, maxView) + }) + } +} diff --git a/integration/internal/emulator/tests/blockchain_test.go b/integration/internal/emulator/tests/blockchain_test.go new file mode 100644 index 00000000000..c42f81fa940 --- /dev/null +++ b/integration/internal/emulator/tests/blockchain_test.go @@ -0,0 +1,153 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/stdlib" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" +) + +const counterScript = ` + + access(all) contract Counting { + + access(all) event CountIncremented(count: Int) + + access(all) resource Counter { + access(all) var count: Int + + init() { + self.count = 0 + } + + access(all) fun add(_ count: Int) { + self.count = self.count + count + emit CountIncremented(count: self.count) + } + } + + access(all) fun createCounter(): @Counter { + return <-create Counter() + } + } +` + +// generateAddTwoToCounterScript generates a script that increments a counter. +// If no counter exists, it is created. +func GenerateAddTwoToCounterScript(counterAddress flowsdk.Address) string { + return fmt.Sprintf( + ` + import 0x%s + + transaction { + prepare(signer: auth(Storage, Capabilities) &Account) { + var counter = signer.storage.borrow<&Counting.Counter>(from: /storage/counter) + if counter == nil { + signer.storage.save(<-Counting.createCounter(), to: /storage/counter) + counter = signer.storage.borrow<&Counting.Counter>(from: /storage/counter) + + // Also publish this for others to borrow. + let cap = signer.capabilities.storage.issue<&Counting.Counter>(/storage/counter) + signer.capabilities.publish(cap, at: /public/counter) + } + counter?.add(2) + } + } + `, + counterAddress, + ) +} + +func DeployAndGenerateAddTwoScript(t *testing.T, adapter *emulator.SDKAdapter) (string, flowsdk.Address) { + + contracts := []templates.Contract{ + { + Name: "Counting", + Source: counterScript, + }, + } + + counterAddress, err := adapter.CreateAccount( + context.Background(), + nil, + contracts, + ) + require.NoError(t, err) + + return GenerateAddTwoToCounterScript(counterAddress), counterAddress +} + +func GenerateGetCounterCountScript(counterAddress flowsdk.Address, accountAddress flowsdk.Address) string { + return fmt.Sprintf( + ` + import 0x%s + + access(all) fun main(): Int { + return getAccount(0x%s).capabilities.borrow<&Counting.Counter>(/public/counter)?.count ?? 0 + } + `, + counterAddress, + accountAddress, + ) +} + +func AssertTransactionSucceeded(t *testing.T, result *emulator.TransactionResult) { + if !assert.True(t, result.Succeeded()) { + t.Error(result.Error) + } +} + +func LastCreatedAccount(b *emulator.Blockchain, result *emulator.TransactionResult) (*flowsdk.Account, error) { + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + address, err := LastCreatedAccountAddress(result) + if err != nil { + return nil, err + } + + return adapter.GetAccount(context.Background(), address) +} + +func LastCreatedAccountAddress(result *emulator.TransactionResult) (flowsdk.Address, error) { + for _, event := range result.Events { + if event.Type == flowsdk.EventAccountCreated { + addressFieldValue := cadence.SearchFieldByName( + event.Value, + stdlib.AccountEventAddressParameter.Identifier, + ) + return flowsdk.Address(addressFieldValue.(cadence.Address)), nil + } + } + + return flowsdk.Address{}, fmt.Errorf("no account created in this result") +} diff --git a/integration/internal/emulator/tests/capcons_test.go b/integration/internal/emulator/tests/capcons_test.go new file mode 100644 index 00000000000..7fe37ecbd12 --- /dev/null +++ b/integration/internal/emulator/tests/capcons_test.go @@ -0,0 +1,44 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/internal/emulator" +) + +func TestCapabilityControllers(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + script := ` + access(all) fun main() { + getAccount(0x1).capabilities.get + } + ` + + _, err = b.ExecuteScript([]byte(script), nil) + require.NoError(t, err) +} diff --git a/integration/internal/emulator/tests/collection_test.go b/integration/internal/emulator/tests/collection_test.go new file mode 100644 index 00000000000..0dae5c6dad6 --- /dev/null +++ b/integration/internal/emulator/tests/collection_test.go @@ -0,0 +1,117 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tests + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestCollections(t *testing.T) { + + t.Parallel() + + t.Run("Empty block", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + block, err := b.CommitBlock() + require.NoError(t, err) + + // block should not contain any collections + assert.Empty(t, block.Payload.Guarantees) + }) + + t.Run("Non-empty block", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + require.NoError(t, err) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + tx2 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // generate a list of transactions + transactions := []*flowsdk.Transaction{tx1, tx2} + + // add all transactions to block + for _, tx := range transactions { + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + } + + block, _, err := b.ExecuteAndCommitBlock() + require.NoError(t, err) + + // block should contain at least one collection + assert.NotEmpty(t, block.Payload.Guarantees) + + i := 0 + for _, guarantee := range block.Payload.Guarantees { + collection, err := adapter.GetCollectionByID(context.Background(), emulator.FlowIdentifierToSDK(guarantee.ID())) + require.NoError(t, err) + + for _, txID := range collection.TransactionIDs { + assert.Equal(t, transactions[i].ID(), txID) + i++ + } + } + }) +} diff --git a/integration/internal/emulator/tests/events_test.go b/integration/internal/emulator/tests/events_test.go new file mode 100644 index 00000000000..9a561fd2c49 --- /dev/null +++ b/integration/internal/emulator/tests/events_test.go @@ -0,0 +1,202 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestEventEmitted(t *testing.T) { + + t.Parallel() + + t.Run("EmittedFromScript", func(t *testing.T) { + + t.Parallel() + + // Emitting events in scripts is not supported + + b, err := emulator.New() + require.NoError(t, err) + + script := []byte(` + access(all) event MyEvent(x: Int, y: Int) + + access(all) fun main() { + emit MyEvent(x: 1, y: 2) + } + `) + + result, err := b.ExecuteScript(script, nil) + assert.NoError(t, err) + require.NoError(t, result.Error) + require.Empty(t, result.Events) + }) + + t.Run("EmittedFromAccount", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + accountContracts := []templates.Contract{ + { + Name: "Test", + Source: ` + access(all) contract Test { + access(all) event MyEvent(x: Int, y: Int) + + access(all) fun emitMyEvent(x: Int, y: Int) { + emit MyEvent(x: x, y: y) + } + } + `, + }, + } + + publicKey := b.ServiceKey() + accountKey := &flowsdk.AccountKey{ + Index: publicKey.Index, + PublicKey: publicKey.PublicKey, + SigAlgo: publicKey.SigAlgo, + HashAlgo: publicKey.HashAlgo, + Weight: publicKey.Weight, + SequenceNumber: publicKey.SequenceNumber, + } + + address, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKey}, + accountContracts, + ) + assert.NoError(t, err) + + script := []byte(fmt.Sprintf(` + import 0x%s + + transaction { + execute { + Test.emitMyEvent(x: 1, y: 2) + } + } + `, address.Hex())) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Succeeded()) + + block, err := b.CommitBlock() + require.NoError(t, err) + + addr, _ := common.BytesToAddress(address.Bytes()) + location := common.AddressLocation{ + Address: addr, + Name: "Test", + } + expectedType := location.TypeID(nil, "Test.MyEvent") + + events, err := adapter.GetEventsForHeightRange(context.Background(), string(expectedType), block.Header.Height, block.Header.Height) + require.NoError(t, err) + require.Len(t, events, 1) + + actualEvent := events[0].Events[0] + decodedEvent := actualEvent.Value + decodedEventType := decodedEvent.Type().(*cadence.EventType) + expectedID := flowsdk.Event{TransactionID: tx.ID(), EventIndex: 0}.ID() + + assert.Equal(t, string(expectedType), actualEvent.Type) + assert.Equal(t, expectedID, actualEvent.ID()) + + fields := decodedEventType.FieldsMappedByName() + + assert.Contains(t, fields, "x") + assert.Contains(t, fields, "y") + + fieldValues := decodedEvent.FieldsMappedByName() + + assert.Equal(t, cadence.NewInt(1), fieldValues["x"]) + assert.Equal(t, cadence.NewInt(2), fieldValues["y"]) + + events, err = adapter.GetEventsForBlockIDs( + context.Background(), + string(expectedType), + []flowsdk.Identifier{ + flowsdk.Identifier(block.Header.ID()), + }, + ) + require.NoError(t, err) + require.Len(t, events, 1) + + actualEvent = events[0].Events[0] + decodedEvent = actualEvent.Value + decodedEventType = decodedEvent.Type().(*cadence.EventType) + expectedID = flowsdk.Event{TransactionID: tx.ID(), EventIndex: 0}.ID() + + assert.Equal(t, string(expectedType), actualEvent.Type) + assert.Equal(t, expectedID, actualEvent.ID()) + + fields = decodedEventType.FieldsMappedByName() + + assert.Contains(t, fields, "x") + assert.Contains(t, fields, "y") + + fieldValues = decodedEvent.FieldsMappedByName() + + assert.Equal(t, cadence.NewInt(1), fieldValues["x"]) + assert.Equal(t, cadence.NewInt(2), fieldValues["y"]) + + }) +} diff --git a/integration/internal/emulator/tests/logs_test.go b/integration/internal/emulator/tests/logs_test.go new file mode 100644 index 00000000000..af65ef14b89 --- /dev/null +++ b/integration/internal/emulator/tests/logs_test.go @@ -0,0 +1,46 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/internal/emulator" +) + +func TestRuntimeLogs(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + script := []byte(` + access(all) fun main() { + log("elephant ears") + } + `) + + result, err := b.ExecuteScript(script, nil) + assert.NoError(t, err) + assert.Equal(t, []string{`"elephant ears"`}, result.Logs) +} diff --git a/integration/internal/emulator/tests/memstore_test.go b/integration/internal/emulator/tests/memstore_test.go new file mode 100644 index 00000000000..a28696d14be --- /dev/null +++ b/integration/internal/emulator/tests/memstore_test.go @@ -0,0 +1,118 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/model/flow" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestMemstore(t *testing.T) { + + t.Parallel() + + const blockHeight = 0 + key := flow.NewRegisterID(flowgo.EmptyAddress, "foo") + value := []byte("bar") + store := emulator.NewMemoryStore() + + err := store.InsertExecutionSnapshot( + blockHeight, + &snapshot.ExecutionSnapshot{ + WriteSet: map[flowgo.RegisterID]flowgo.RegisterValue{ + key: value, + }, + }, + ) + require.NoError(t, err) + + var wg sync.WaitGroup + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + snapshot, err := store.LedgerByHeight( + context.Background(), + blockHeight) + require.NoError(t, err) + actualValue, err := snapshot.Get(key) + + require.NoError(t, err) + assert.Equal(t, value, actualValue) + }() + } + + wg.Wait() +} + +func TestMemstoreSetValueToNil(t *testing.T) { + + t.Parallel() + + store := emulator.NewMemoryStore() + key := flow.NewRegisterID(flowgo.EmptyAddress, "foo") + value := []byte("bar") + var nilByte []byte + nilValue := nilByte + + // set initial value + err := store.InsertExecutionSnapshot( + 0, + &snapshot.ExecutionSnapshot{ + WriteSet: map[flowgo.RegisterID]flowgo.RegisterValue{ + key: value, + }, + }) + require.NoError(t, err) + + // check initial value + ledger, err := store.LedgerByHeight(context.Background(), 0) + require.NoError(t, err) + register, err := ledger.Get(key) + require.NoError(t, err) + require.Equal(t, string(value), string(register)) + + // set value to nil + err = store.InsertExecutionSnapshot( + 1, + &snapshot.ExecutionSnapshot{ + WriteSet: map[flowgo.RegisterID]flowgo.RegisterValue{ + key: nilValue, + }, + }) + require.NoError(t, err) + + // check value is nil + ledger, err = store.LedgerByHeight(context.Background(), 1) + require.NoError(t, err) + register, err = ledger.Get(key) + require.NoError(t, err) + require.Equal(t, string(nilValue), string(register)) +} diff --git a/integration/internal/emulator/tests/pendingBlock_test.go b/integration/internal/emulator/tests/pendingBlock_test.go new file mode 100644 index 00000000000..be210742839 --- /dev/null +++ b/integration/internal/emulator/tests/pendingBlock_test.go @@ -0,0 +1,459 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func setupPendingBlockTests(t *testing.T) ( + *emulator.Blockchain, + *emulator.SDKAdapter, + *flowsdk.Transaction, + *flowsdk.Transaction, + *flowsdk.Transaction, +) { + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + tx2 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber+1). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + invalid := flowsdk.NewTransaction(). + SetScript([]byte(`transaction { execute { panic("revert!") } }`)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = invalid.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + return b, adapter, tx1, tx2, invalid +} + +func TestPendingBlockBeforeExecution(t *testing.T) { + + t.Parallel() + + t.Run("EmptyPendingBlock", func(t *testing.T) { + + t.Parallel() + + b, _, _, _, _ := setupPendingBlockTests(t) + + // Execute empty pending block + _, err := b.ExecuteBlock() + assert.NoError(t, err) + + // Commit empty pending block + _, err = b.CommitBlock() + assert.NoError(t, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("AddDuplicateTransaction", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx, _, _ := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Add tx1 again + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.DuplicateTransactionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("CommitBeforeExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx, _, _ := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Attempt to commit block before execution begins + _, err = b.CommitBlock() + assert.IsType(t, &emulator.PendingBlockCommitBeforeExecutionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) +} + +func TestPendingBlockDuringExecution(t *testing.T) { + + t.Parallel() + + t.Run("ExecuteNextTransaction", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + require.NoError(t, err) + + // Add invalid script tx to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + require.NoError(t, err) + + // Execute tx1 (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Execute invalid script tx (reverts) + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("ExecuteBlock", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + require.NoError(t, err) + + // Add invalid script tx to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + require.NoError(t, err) + + // Execute all tx in pending block (tx1, invalid) + results, err := b.ExecuteBlock() + assert.NoError(t, err) + + // tx1 result + assert.True(t, results[0].Succeeded()) + // invalid script tx result + assert.True(t, results[1].Reverted()) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("ExecuteNextThenBlock", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, tx2, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Add tx2 to pending block + err = adapter.SendTransaction(context.Background(), *tx2) + assert.NoError(t, err) + + // Add invalid script tx to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + assert.NoError(t, err) + + // Execute tx1 first (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Execute rest of tx in pending block (tx2, invalid) + results, err := b.ExecuteBlock() + assert.NoError(t, err) + // tx2 result + assert.True(t, results[0].Succeeded()) + // invalid script tx result + assert.True(t, results[1].Reverted()) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("AddTransactionMidExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, tx2, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Add invalid to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + assert.NoError(t, err) + + // Execute tx1 first (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Attempt to add tx2 to pending block after execution begins + err = adapter.SendTransaction(context.Background(), *tx2) + assert.IsType(t, &emulator.PendingBlockMidExecutionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("CommitMidExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Add invalid to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + assert.NoError(t, err) + + // Execute tx1 first (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Attempt to commit block before execution finishes + _, err = b.CommitBlock() + assert.IsType(t, &emulator.PendingBlockMidExecutionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("TransactionsExhaustedDuringExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, _ := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Execute tx1 (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Attempt to execute nonexistent next tx (fails) + _, err = b.ExecuteNextTransaction() + assert.IsType(t, &emulator.PendingBlockTransactionsExhaustedError{}, err) + + // Attempt to execute rest of block tx (fails) + _, err = b.ExecuteBlock() + assert.IsType(t, &emulator.PendingBlockTransactionsExhaustedError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) +} + +func TestPendingBlockCommit(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + t.Run("CommitBlock", func(t *testing.T) { + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx1 to pending block + err = adapter.SendTransaction(context.Background(), *tx1) + require.NoError(t, err) + + // Enter execution mode (block hash should not change after this point) + blockID := b.PendingBlockID() + + // Execute tx1 (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Commit pending block + block, err := b.CommitBlock() + assert.NoError(t, err) + assert.Equal(t, blockID, block.ID()) + }) + + t.Run("ExecuteAndCommitBlock", func(t *testing.T) { + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx1 to pending block + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Enter execution mode (block hash should not change after this point) + blockID := b.PendingBlockID() + + // Execute and commit pending block + block, results, err := b.ExecuteAndCommitBlock() + assert.NoError(t, err) + assert.Equal(t, blockID, block.ID()) + assert.Len(t, results, 1) + }) +} + +type testClock struct { + Time time.Time +} + +func (tc testClock) Now() time.Time { + return tc.Time.UTC() +} + +func TestPendingBlockSetTimestamp(t *testing.T) { + + t.Parallel() + + b, adapter, _, _, _ := setupPendingBlockTests(t) + clock := testClock{ + Time: time.Now().UTC(), + } + b.SetClock(clock.Now) + _, _ = b.CommitBlock() + + script := []byte(` + access(all) fun main(): UFix64 { + return getCurrentBlock().timestamp + } + `) + scriptResult, err := adapter.ExecuteScriptAtLatestBlock( + context.Background(), + script, + [][]byte{}, + ) + require.NoError(t, err) + + expected := fmt.Sprintf( + "{\"value\":\"%d.00000000\",\"type\":\"UFix64\"}\n", + clock.Time.Unix(), + ) + assert.Equal(t, expected, string(scriptResult)) + + clock = testClock{ + Time: time.Now().Add(time.Hour * 24 * 7).UTC(), + } + b.SetClock(clock.Now) + _, _ = b.CommitBlock() + + _, err = adapter.ExecuteScriptAtLatestBlock( + context.Background(), + script, + [][]byte{}, + ) + require.NoError(t, err) + + /*expected = fmt.Sprintf( + "{\"value\":\"%d.00000000\",\"type\":\"UFix64\"}\n", + clock.Time.Unix(), + )*/ + //assert.Equal(t, expected, string(scriptResult)) +} diff --git a/integration/internal/emulator/tests/result_test.go b/integration/internal/emulator/tests/result_test.go new file mode 100644 index 00000000000..c5679a509b3 --- /dev/null +++ b/integration/internal/emulator/tests/result_test.go @@ -0,0 +1,87 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "errors" + "testing" + + "github.com/onflow/cadence" + "github.com/stretchr/testify/assert" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/test" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/model/flow" +) + +func TestResult(t *testing.T) { + + t.Parallel() + + t.Run("should return correct boolean", func(t *testing.T) { + + t.Parallel() + + idGenerator := test.IdentifierGenerator() + + trSucceed := &emulator.TransactionResult{ + TransactionID: idGenerator.New(), + ComputationUsed: 20, + MemoryEstimate: 2048, + Error: nil, + Logs: []string{}, + Events: []flowsdk.Event{}, + } + assert.True(t, trSucceed.Succeeded()) + assert.False(t, trSucceed.Reverted()) + + trReverted := &emulator.TransactionResult{ + TransactionID: idGenerator.New(), + ComputationUsed: 20, + MemoryEstimate: 2048, + Error: errors.New("transaction execution error"), + Logs: []string{}, + Events: []flowsdk.Event{}, + } + assert.True(t, trReverted.Reverted()) + assert.False(t, trReverted.Succeeded()) + + srSucceed := &emulator.ScriptResult{ + ScriptID: emulator.SDKIdentifierToFlow(idGenerator.New()), + Value: cadence.Value(cadence.NewInt(1)), + Error: nil, + Logs: []string{}, + Events: []flow.Event{}, + } + assert.True(t, srSucceed.Succeeded()) + assert.False(t, srSucceed.Reverted()) + + srReverted := &emulator.ScriptResult{ + ScriptID: emulator.SDKIdentifierToFlow(idGenerator.New()), + Value: cadence.Value(cadence.NewInt(1)), + Error: errors.New("transaction execution error"), + Logs: []string{}, + Events: []flow.Event{}, + } + assert.True(t, srReverted.Reverted()) + assert.False(t, srReverted.Succeeded()) + }) +} diff --git a/integration/internal/emulator/tests/script_test.go b/integration/internal/emulator/tests/script_test.go new file mode 100644 index 00000000000..ad7e4782512 --- /dev/null +++ b/integration/internal/emulator/tests/script_test.go @@ -0,0 +1,316 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/stdlib" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestExecuteScript(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + addTwoScript, counterAddress := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + callScript := GenerateGetCounterCountScript(counterAddress, serviceAccountAddress) + + // Sample call (value is 0) + scriptResult, err := b.ExecuteScript([]byte(callScript), nil) + require.NoError(t, err) + assert.Equal(t, cadence.NewInt(0), scriptResult.Value) + + // Submit tx (script adds 2) + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + txResult, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, txResult) + + t.Run("BeforeCommit", func(t *testing.T) { + t.Skip("TODO: fix stored ledger") + + // Sample call (value is still 0) + result, err := b.ExecuteScript([]byte(callScript), nil) + require.NoError(t, err) + assert.Equal(t, cadence.NewInt(0), result.Value) + }) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + t.Run("AfterCommit", func(t *testing.T) { + // Sample call (value is 2) + result, err := b.ExecuteScript([]byte(callScript), nil) + require.NoError(t, err) + assert.Equal(t, cadence.NewInt(2), result.Value) + }) +} + +func TestExecuteScript_WithArguments(t *testing.T) { + + t.Parallel() + + t.Run("Int", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + scriptWithArgs := ` + access(all) fun main(n: Int): Int { + return n + } + ` + + arg, err := jsoncdc.Encode(cadence.NewInt(10)) + require.NoError(t, err) + + scriptResult, err := b.ExecuteScript([]byte(scriptWithArgs), [][]byte{arg}) + require.NoError(t, err) + + assert.Equal(t, cadence.NewInt(10), scriptResult.Value) + }) + + t.Run("String", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + scriptWithArgs := ` + access(all) fun main(n: String): Int { + log(n) + return 0 + } + ` + + arg, err := jsoncdc.Encode(cadence.String("Hello, World")) + require.NoError(t, err) + scriptResult, err := b.ExecuteScript([]byte(scriptWithArgs), [][]byte{arg}) + require.NoError(t, err) + assert.Contains(t, scriptResult.Logs, "\"Hello, World\"") + }) +} + +func TestExecuteScript_FlowServiceAccountBalance(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + code := fmt.Sprintf( + ` + import FlowServiceAccount from %[1]s + + access(all) + fun main(): UFix64 { + let acct = getAccount(%[1]s) + return FlowServiceAccount.defaultTokenBalance(acct) + } + `, + b.GetChain().ServiceAddress().HexWithPrefix(), + ) + + res, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + require.NoError(t, res.Error) + + require.Positive(t, res.Value) +} + +func TestInfiniteScript(t *testing.T) { + + t.Parallel() + + const limit = 18 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + const code = ` + access(all) fun main() { + main() + } + ` + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) +} + +func TestScriptExecutionLimit(t *testing.T) { + + t.Parallel() + + const code = ` + access(all) fun main() { + var s: Int256 = 1024102410241024 + var i: Int256 = 0 + var a: Int256 = 7 + var b: Int256 = 5 + var c: Int256 = 2 + + while i < 150000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + } + ` + + t.Run("ExceedingLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 2000 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) + }) + + t.Run("SufficientLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 19000 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + require.NoError(t, result.Error) + }) +} + +// TestScriptWithCadenceRandom checks Cadence's random function works +// within a script +func TestScriptWithCadenceRandom(t *testing.T) { + + //language=cadence + code := ` + access(all) + fun main() { + assert(revertibleRandom() >= 0) + } + ` + + const limit = 200 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + require.NoError(t, result.Error) +} + +// TestEVM checks evm functionality +func TestEVM(t *testing.T) { + serviceAddr := flowgo.Emulator.Chain().ServiceAddress() + code := []byte(fmt.Sprintf( + ` + import EVM from 0x%s + + access(all) + fun main(bytes: [UInt8; 20]) { + log(EVM.EVMAddress(bytes: bytes)) + } + `, + serviceAddr, + )) + + gasLimit := uint64(100_000) + + b, err := emulator.New( + emulator.WithScriptGasLimit(gasLimit), + ) + require.NoError(t, err) + + addressBytesArray := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), cadence.UInt8(1), + cadence.UInt8(2), cadence.UInt8(2), + cadence.UInt8(3), cadence.UInt8(3), + cadence.UInt8(4), cadence.UInt8(4), + cadence.UInt8(5), cadence.UInt8(5), + cadence.UInt8(6), cadence.UInt8(6), + cadence.UInt8(7), cadence.UInt8(7), + cadence.UInt8(8), cadence.UInt8(8), + cadence.UInt8(9), cadence.UInt8(9), + cadence.UInt8(10), cadence.UInt8(10), + }).WithType(stdlib.EVMAddressBytesCadenceType) + + result, err := b.ExecuteScript(code, [][]byte{jsoncdc.MustEncode(addressBytesArray)}) + require.NoError(t, err) + require.NoError(t, result.Error) + require.Len(t, result.Logs, 1) + require.Equal(t, result.Logs[0], fmt.Sprintf("A.%s.EVM.EVMAddress(bytes: %s)", serviceAddr, addressBytesArray.String())) + +} diff --git a/integration/internal/emulator/tests/store_test.go b/integration/internal/emulator/tests/store_test.go new file mode 100644 index 00000000000..1a4db36057f --- /dev/null +++ b/integration/internal/emulator/tests/store_test.go @@ -0,0 +1,482 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests_test + +import ( + "context" + "fmt" + "testing" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go-sdk/test" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/integration/internal/emulator/utils/unittest" + "github.com/onflow/flow-go/model/flow" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestBlocks(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + block1 := &flowgo.Block{ + Header: &flowgo.Header{ + Height: 1, + }, + } + block2 := &flowgo.Block{ + Header: &flowgo.Header{ + Height: 2, + }, + } + + t.Run("should return error for not found", func(t *testing.T) { + t.Run("BlockByID", func(t *testing.T) { + freshId := test.IdentifierGenerator().New() + _, err := store.BlockByID(context.Background(), flowgo.Identifier(freshId)) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("BlockByHeight", func(t *testing.T) { + _, err := store.BlockByHeight(context.Background(), block1.Header.Height) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("LatestBlock", func(t *testing.T) { + _, err := store.LatestBlock(context.Background()) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + }) + + t.Run("should be able to insert block", func(t *testing.T) { + err := store.StoreBlock(context.Background(), block1) + assert.NoError(t, err) + }) + + // insert block 1 + err := store.StoreBlock(context.Background(), block1) + assert.NoError(t, err) + + t.Run("should be able to get inserted block", func(t *testing.T) { + t.Run("BlockByHeight", func(t *testing.T) { + block, err := store.BlockByHeight(context.Background(), block1.Header.Height) + assert.NoError(t, err) + assert.Equal(t, block1, block) + }) + + t.Run("BlockByID", func(t *testing.T) { + block, err := store.BlockByID(context.Background(), block1.ID()) + assert.NoError(t, err) + assert.Equal(t, block1, block) + }) + + t.Run("LatestBlock", func(t *testing.T) { + block, err := store.LatestBlock(context.Background()) + assert.NoError(t, err) + assert.Equal(t, *block1, block) + }) + }) + + // insert block 2 + err = store.StoreBlock(context.Background(), block2) + assert.NoError(t, err) + + t.Run("Latest block should update", func(t *testing.T) { + block, err := store.LatestBlock(context.Background()) + assert.NoError(t, err) + assert.Equal(t, *block2, block) + }) +} + +func TestCollections(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + // collection with 3 transactions + col := unittest.FullCollectionFixture(3) + + t.Run("should return error for not found", func(t *testing.T) { + _, err := store.CollectionByID(context.Background(), col.ID()) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("should be able to insert collection", func(t *testing.T) { + err := store.InsertCollection(col.Light()) + assert.NoError(t, err) + + t.Run("should be able to get inserted collection", func(t *testing.T) { + storedCol, err := store.CollectionByID(context.Background(), col.ID()) + require.NoError(t, err) + assert.Equal(t, col.Light(), storedCol) + }) + }) +} + +func TestTransactions(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + tx := unittest.TransactionFixture() + + t.Run("should return error for not found", func(t *testing.T) { + _, err := store.TransactionByID(context.Background(), tx.ID()) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("should be able to insert tx", func(t *testing.T) { + err := store.InsertTransaction(tx.ID(), tx) + assert.NoError(t, err) + + t.Run("should be able to get inserted tx", func(t *testing.T) { + storedTx, err := store.TransactionByID(context.Background(), tx.ID()) + require.NoError(t, err) + assert.Equal(t, tx.ID(), storedTx.ID()) + }) + }) +} + +func TestFullCollection(t *testing.T) { + t.Parallel() + store := setupStore(t) + + col := unittest.FullCollectionFixture(3) + + t.Run("should be able to insert full collection", func(t *testing.T) { + _, err := store.CollectionByID(context.Background(), col.ID()) + require.Error(t, emulator.ErrNotFound, err) + + _, err = store.FullCollectionByID(context.Background(), col.ID()) + require.Error(t, emulator.ErrNotFound, err) + + err = store.InsertCollection(col.Light()) + require.NoError(t, err) + + for _, tx := range col.Transactions { + err = store.InsertTransaction(tx.ID(), *tx) + require.NoError(t, err) + } + + c, err := store.FullCollectionByID(context.Background(), col.ID()) + require.NoError(t, err) + require.Equal(t, col, c) + }) + +} + +func TestTransactionResults(t *testing.T) { + + t.Parallel() + + test := func(eventEncodingVersion entities.EventEncodingVersion) { + + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + + store := setupStore(t) + + ids := test.IdentifierGenerator() + + result := unittest.StorableTransactionResultFixture(eventEncodingVersion) + + t.Run("should return error for not found", func(t *testing.T) { + txID := flowgo.Identifier(ids.New()) + + _, err := store.TransactionResultByID(context.Background(), txID) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("should be able to insert result", func(t *testing.T) { + txID := flowgo.Identifier(ids.New()) + + err := store.InsertTransactionResult(txID, result) + assert.NoError(t, err) + + t.Run("should be able to get inserted result", func(t *testing.T) { + storedResult, err := store.TransactionResultByID(context.Background(), txID) + require.NoError(t, err) + assert.Equal(t, result, storedResult) + }) + }) + }) + } + + test(entities.EventEncodingVersion_CCF_V0) + test(entities.EventEncodingVersion_JSON_CDC_V0) +} + +func TestLedger(t *testing.T) { + + t.Parallel() + + t.Run("get/set", func(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + var blockHeight uint64 = 1 + + owner := flow.HexToAddress("0x01") + const key = "foo" + expected := []byte("bar") + + executionSnapshot := &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + flow.NewRegisterID(owner, key): expected, + }, + } + + t.Run("should get able to set ledger", func(t *testing.T) { + err := store.InsertExecutionSnapshot( + blockHeight, + executionSnapshot) + assert.NoError(t, err) + }) + + t.Run("should be to get set ledger", func(t *testing.T) { + gotLedger, err := store.LedgerByHeight(context.Background(), blockHeight) + assert.NoError(t, err) + actual, err := gotLedger.Get(flow.NewRegisterID(owner, key)) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + }) + }) + + t.Run("versioning", func(t *testing.T) { + + t.Parallel() + store := setupStore(t) + + owner := flow.HexToAddress("0x01") + + // Create a list of ledgers, where the ledger at index i has + // keys (i+2)-1->(i+2)+1 set to value i-1. + totalBlocks := 10 + var snapshots []*snapshot.ExecutionSnapshot + for i := 2; i < totalBlocks+2; i++ { + writeSet := map[flow.RegisterID]flow.RegisterValue{} + for j := i - 1; j <= i+1; j++ { + key := fmt.Sprintf("%d", j) + writeSet[flow.NewRegisterID(owner, key)] = []byte{byte(i - 1)} + } + snapshots = append( + snapshots, + &snapshot.ExecutionSnapshot{WriteSet: writeSet}) + } + require.Equal(t, totalBlocks, len(snapshots)) + + // Insert all the ledgers, starting with block 1. + // This will result in a ledger state that looks like this: + // Block 1: {1: 1, 2: 1, 3: 1} + // Block 2: {2: 2, 3: 2, 4: 2} + // ... + // The combined state at block N looks like: + // {1: 1, 2: 2, 3: 3, ..., N+1: N, N+2: N} + for i, snapshot := range snapshots { + err := store.InsertExecutionSnapshot( + uint64(i+1), + snapshot) + require.NoError(t, err) + } + + // View at block 1 should have keys 1, 2, 3 + t.Run("should version the first written block", func(t *testing.T) { + gotLedger, err := store.LedgerByHeight(context.Background(), 1) + assert.NoError(t, err) + for i := 1; i <= 3; i++ { + val, err := gotLedger.Get(flow.NewRegisterID(owner, fmt.Sprintf("%d", i))) + assert.NoError(t, err) + assert.Equal(t, []byte{byte(1)}, val) + } + }) + + // View at block N should have values 1->N+2 + t.Run("should version all blocks", func(t *testing.T) { + for block := 2; block < totalBlocks; block++ { + gotLedger, err := store.LedgerByHeight(context.Background(), uint64(block)) + assert.NoError(t, err) + // The keys 1->N-1 are defined in previous blocks + for i := 1; i < block; i++ { + val, err := gotLedger.Get(flow.NewRegisterID(owner, fmt.Sprintf("%d", i))) + assert.NoError(t, err) + assert.Equal(t, []byte{byte(i)}, val) + } + // The keys N->N+2 are defined in the queried block + for i := block; i <= block+2; i++ { + val, err := gotLedger.Get(flow.NewRegisterID(owner, fmt.Sprintf("%d", i))) + assert.NoError(t, err) + assert.Equal(t, []byte{byte(block)}, val) + } + } + }) + }) +} + +func TestInsertEvents(t *testing.T) { + + t.Parallel() + + test := func(eventEncodingVersion entities.EventEncodingVersion) { + + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + + store := setupStore(t) + + events := test.EventGenerator(eventEncodingVersion) + + t.Run("should be able to insert events", func(t *testing.T) { + event, _ := emulator.SDKEventToFlow(events.New()) + events := []flowgo.Event{event} + + var blockHeight uint64 = 1 + + err := store.InsertEvents(blockHeight, events) + assert.NoError(t, err) + + t.Run("should be able to get inserted events", func(t *testing.T) { + gotEvents, err := store.EventsByHeight(context.Background(), blockHeight, "") + assert.NoError(t, err) + assert.Equal(t, events, gotEvents) + }) + }) + }) + } + + test(entities.EventEncodingVersion_CCF_V0) + test(entities.EventEncodingVersion_JSON_CDC_V0) +} + +func TestEventsByHeight(t *testing.T) { + + t.Parallel() + test := func(eventEncodingVersion entities.EventEncodingVersion) { + + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + + store := setupStore(t) + + events := test.EventGenerator(eventEncodingVersion) + + var ( + nonEmptyBlockHeight uint64 = 1 + emptyBlockHeight uint64 = 2 + nonExistentBlockHeight uint64 = 3 + + allEvents = make([]flowgo.Event, 10) + eventsA = make([]flowgo.Event, 0, 5) + eventsB = make([]flowgo.Event, 0, 5) + ) + + for i := range allEvents { + event, _ := emulator.SDKEventToFlow(events.New()) + + event.TransactionIndex = uint32(i) + event.EventIndex = uint32(i * 2) + + // interleave events of both types + if i%2 == 0 { + event.Type = "A" + eventsA = append(eventsA, event) + } else { + event.Type = "B" + eventsB = append(eventsB, event) + } + + allEvents[i] = event + } + + err := store.InsertEvents(nonEmptyBlockHeight, allEvents) + assert.NoError(t, err) + + err = store.InsertEvents(emptyBlockHeight, nil) + assert.NoError(t, err) + + t.Run("should be able to query by block", func(t *testing.T) { + t.Run("non-empty block", func(t *testing.T) { + events, err := store.EventsByHeight(context.Background(), nonEmptyBlockHeight, "") + assert.NoError(t, err) + assert.Equal(t, allEvents, events) + }) + + t.Run("empty block", func(t *testing.T) { + events, err := store.EventsByHeight(context.Background(), emptyBlockHeight, "") + assert.NoError(t, err) + assert.Empty(t, events) + }) + + t.Run("non-existent block", func(t *testing.T) { + events, err := store.EventsByHeight(context.Background(), nonExistentBlockHeight, "") + assert.NoError(t, err) + assert.Empty(t, events) + }) + }) + + t.Run("should be able to query by event type", func(t *testing.T) { + t.Run("type=A, block=1", func(t *testing.T) { + // should be one event type=1 in block 1 + events, err := store.EventsByHeight(context.Background(), nonEmptyBlockHeight, "A") + assert.NoError(t, err) + assert.Equal(t, eventsA, events) + }) + + t.Run("type=B, block=1", func(t *testing.T) { + // should be 0 type=2 events here + events, err := store.EventsByHeight(context.Background(), nonEmptyBlockHeight, "B") + assert.NoError(t, err) + assert.Equal(t, eventsB, events) + }) + }) + }) + } + + test(entities.EventEncodingVersion_CCF_V0) + test(entities.EventEncodingVersion_JSON_CDC_V0) +} + +// setupStore creates a temporary file for the Sqlite and creates a +// sqlite.Store instance. The caller is responsible for closing the store +// and deleting the temporary directory. +func setupStore(t *testing.T) *emulator.Store { + return emulator.NewMemoryStore() +} diff --git a/integration/internal/emulator/tests/temp_dep_test.go b/integration/internal/emulator/tests/temp_dep_test.go new file mode 100644 index 00000000000..6bd6219f1b6 --- /dev/null +++ b/integration/internal/emulator/tests/temp_dep_test.go @@ -0,0 +1,25 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tests + +import "github.com/btcsuite/btcd/chaincfg/chainhash" + +// this is added to resolve the issue with chainhash ambiguous import, +// the code is not used, but it's needed to force go.mod specify and retain chainhash version +// workaround for issue: https://github.com/golang/go/issues/27899 +var _ = chainhash.Hash{} diff --git a/integration/internal/emulator/tests/transaction_test.go b/integration/internal/emulator/tests/transaction_test.go new file mode 100644 index 00000000000..cc7202ff2f3 --- /dev/null +++ b/integration/internal/emulator/tests/transaction_test.go @@ -0,0 +1,2145 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + + "github.com/onflow/flow-go-sdk" + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + "github.com/onflow/flow-go-sdk/test" + + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/stdlib" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func setupTransactionTests(t *testing.T, opts ...emulator.Option) ( + *emulator.Blockchain, + *emulator.SDKAdapter, +) { + b, err := emulator.New(opts...) + require.NoError(t, err) + + logger := zerolog.Nop() + return b, emulator.NewSDKAdapter(&logger, b) +} + +func TestSubmitTransaction(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx1 + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Execute tx1 + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx1.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) +} + +// TODO: Add test case for missing ReferenceBlockID +// TODO: Add test case for missing ProposalKey +func TestSubmitTransaction_Invalid(t *testing.T) { + + t.Parallel() + + t.Run("Empty transaction", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // Create empty transaction (no required fields) + tx := flowsdk.NewTransaction() + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.IncompleteTransactionError{}) + }) + + t.Run("Missing script", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // Create transaction with no Script field + tx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.IncompleteTransactionError{}) + }) + + t.Run("Invalid script", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // Create transaction with invalid Script field + tx := flowsdk.NewTransaction(). + SetScript([]byte("this script cannot be parsed")). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.InvalidTransactionScriptError{}, err) + }) + + t.Run("Missing gas limit", func(t *testing.T) { + + t.Parallel() + + t.Skip("TODO: transaction validation") + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // Create transaction with no GasLimit field + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.IncompleteTransactionError{}, err) + }) + + t.Run("Missing payer account", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // Create transaction with no PayerAccount field + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.IncompleteTransactionError{}) + }) + + t.Run("Missing proposal key", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // Create transaction with no PayerAccount field + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit) + + tx.ProposalKey = flowsdk.ProposalKey{} + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.IncompleteTransactionError{}, err) + }) + + t.Run("Invalid sequence number", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + invalidSequenceNumber := b.ServiceKey().SequenceNumber + 2137 + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetPayer(serviceAccountAddress). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, invalidSequenceNumber). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + require.Error(t, result.Error) + + assert.IsType(t, &emulator.FVMError{}, result.Error) + seqErr := fvmerrors.InvalidProposalSeqNumberError{} + ok := errors.As(result.Error, &seqErr) + assert.True(t, ok) + assert.Equal(t, invalidSequenceNumber, seqErr.ProvidedSeqNumber()) + }) + + const expiry = 10 + + t.Run("Missing reference block ID", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithTransactionExpiry(expiry), + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.IncompleteTransactionError{}, err) + }) + + t.Run("Expired transaction", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithTransactionExpiry(expiry), + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + expiredBlock, err := b.GetLatestBlock() + require.NoError(t, err) + + // commit blocks until expiry window is exceeded + for i := 0; i < expiry+1; i++ { + _, _, err := b.ExecuteAndCommitBlock() + require.NoError(t, err) + } + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetReferenceBlockID(flowsdk.Identifier(expiredBlock.ID())). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.ExpiredTransactionError{}, err) + }) + + t.Run("Invalid signature for provided data", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + tx.SetComputeLimit(100) // change data after signing + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + debug := emulator.NewTransactionInvalidSignature(&flowgo.TransactionBody{ + ReferenceBlockID: flowgo.Identifier{}, + Script: nil, + Arguments: nil, + GasLimit: flowgo.DefaultMaxTransactionGasLimit, + ProposalKey: flowgo.ProposalKey{ + Address: emulator.SDKAddressToFlow(serviceAccountAddress), + KeyIndex: b.ServiceKey().Index, + SequenceNumber: b.ServiceKey().SequenceNumber, + }, + Payer: emulator.SDKAddressToFlow(serviceAccountAddress), + Authorizers: emulator.SDKAddressesToFlow([]flowsdk.Address{serviceAccountAddress}), + PayloadSignatures: nil, + EnvelopeSignatures: nil, + }) + + assert.NotNil(t, result.Error) + assert.IsType(t, result.Debug, debug) + }) +} + +func TestSubmitTransaction_Duplicate(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // Submit same tx again (errors) + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.DuplicateTransactionError{}) +} + +func TestSubmitTransaction_Reverted(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(`transaction { execute { panic("revert!") } }`)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit invalid tx1 + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + assert.Error(t, tx1Result.Error) +} + +func TestSubmitTransaction_Authorizers(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, signerB := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold) + + accountAddressB, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyB}, nil) + assert.NoError(t, err) + + t.Run("Extra authorizers", func(t *testing.T) { + // script only supports one account + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + + // create transaction with two authorizing accounts + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress). + AddAuthorizer(accountAddressB) + + err = tx.SignPayload(accountAddressB, 0, signerB) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + assert.True(t, result.Reverted()) + + _, err = b.CommitBlock() + assert.NoError(t, err) + }) + + t.Run("Insufficient authorizers", func(t *testing.T) { + // script requires two accounts + script := []byte(` + transaction { + prepare(signerA: &Account, signerB: &Account) {} + } + `) + + // create transaction with two accounts + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + _, err = b.CommitBlock() + assert.NoError(t, err) + }) +} + +func TestSubmitTransaction_EnvelopeSignature(t *testing.T) { + + t.Parallel() + + t.Run("Missing envelope signature", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignPayload(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + }) + + t.Run("Invalid account", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addresses := flowsdk.NewAddressGenerator(flowsdk.Emulator) + for { + _, err := adapter.GetAccount(context.Background(), addresses.NextAddress()) + if err != nil { + break + } + } + + nonExistentAccountAddress := addresses.Address() + + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(nonExistentAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignPayload(nonExistentAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.Error(t, result.Error) + assert.True(t, fvmerrors.IsAccountPublicKeyNotFoundError(result.Error)) + }) + + t.Run("Mismatched authorizer count", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithTransactionValidationEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addresses := flowsdk.NewAddressGenerator(flowsdk.Emulator) + for { + _, err := adapter.GetAccount(context.Background(), addresses.NextAddress()) + if err != nil { + break + } + } + + nonExistentAccountAddress := addresses.Address() + + script := []byte(` + transaction { + prepare() {} + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(nonExistentAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignPayload(nonExistentAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.ErrorContains(t, result.Error, "authorizer count mismatch") + }) + + t.Run("Invalid key", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // use key that does not exist on service account + invalidKey, _ := crypto.GeneratePrivateKey(crypto.ECDSA_P256, + []byte("invalid key invalid key invalid key invalid key invalid key invalid key")) + invalidSigner, err := crypto.NewNaiveSigner(invalidKey, crypto.SHA3_256) + require.NoError(t, err) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, invalidSigner) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeInvalidProposalSignatureError)) + }) + + t.Run("Key weights", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + + accountKeys := test.AccountKeyGenerator() + + accountKeyA, signerA := accountKeys.NewWithSigner() + accountKeyA.SetWeight(flowsdk.AccountKeyWeightThreshold / 2) + + accountKeyB, signerB := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold / 2) + + accountAddressA, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyA, accountKeyB}, nil) + assert.NoError(t, err) + + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(accountAddressA, 1, 0). + SetPayer(accountAddressA). + AddAuthorizer(accountAddressA) + + // Insufficient keys + err = tx.SignEnvelope(accountAddressA, 1, signerB) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Add key so we have sufficient keys + err = tx.SignEnvelope(accountAddressA, 0, signerA) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + t.Run("Insufficient key weight", func(t *testing.T) { + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + }) + + t.Run("Sufficient key weight", func(t *testing.T) { + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + AssertTransactionSucceeded(t, result) + }) + }) +} + +func TestSubmitTransaction_PayloadSignatures(t *testing.T) { + + t.Parallel() + + t.Run("Missing payload signature", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // create a new account, + // authorizer must be different from payer + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, _ := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold) + + accountAddressB, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyB}, nil) + assert.NoError(t, err) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(accountAddressB) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + }) + + t.Run("Multiple payload signers", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, signerB := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold) + + accountAddressB, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyB}, nil) + assert.NoError(t, err) + + multipleAccountScript := []byte(` + transaction { + prepare(signerA: &Account, signerB: &Account) { + log(signerA.address) + log(signerB.address) + } + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(multipleAccountScript). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress). + AddAuthorizer(accountAddressB) + + err = tx.SignPayload(accountAddressB, 0, signerB) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + assert.Contains(t, + result.Logs, + interpreter.NewUnmeteredAddressValueFromBytes(serviceAccountAddress.Bytes()).String(), + ) + + assert.Contains(t, + result.Logs, + interpreter.NewUnmeteredAddressValueFromBytes(accountAddressB.Bytes()).String(), + ) + }) +} + +func TestSubmitTransaction_Arguments(t *testing.T) { + + t.Parallel() + + addresses := test.AddressGenerator() + + fix64Value, _ := cadence.NewFix64("123456.00000") + uFix64Value, _ := cadence.NewUFix64("123456.00000") + + var tests = []struct { + argType cadence.Type + arg cadence.Value + }{ + { + cadence.BoolType, + cadence.NewBool(true), + }, + { + cadence.StringType, + cadence.String("foo"), + }, + { + cadence.AddressType, + cadence.NewAddress(addresses.New()), + }, + { + cadence.IntType, + cadence.NewInt(42), + }, + { + cadence.Int8Type, + cadence.NewInt8(42), + }, + { + cadence.Int16Type, + cadence.NewInt16(42), + }, + { + cadence.Int32Type, + cadence.NewInt32(42), + }, + { + cadence.Int64Type, + cadence.NewInt64(42), + }, + { + cadence.Int128Type, + cadence.NewInt128(42), + }, + { + cadence.Int256Type, + cadence.NewInt256(42), + }, + { + cadence.UIntType, + cadence.NewUInt(42), + }, + { + cadence.UInt8Type, + cadence.NewUInt8(42), + }, + { + cadence.UInt16Type, + cadence.NewUInt16(42), + }, + { + cadence.UInt32Type, + cadence.NewUInt32(42), + }, + { + cadence.UInt64Type, + cadence.NewUInt64(42), + }, + { + cadence.UInt128Type, + cadence.NewUInt128(42), + }, + { + cadence.UInt256Type, + cadence.NewUInt256(42), + }, + { + cadence.Word8Type, + cadence.NewWord8(42), + }, + { + cadence.Word16Type, + cadence.NewWord16(42), + }, + { + cadence.Word32Type, + cadence.NewWord32(42), + }, + { + cadence.Word64Type, + cadence.NewWord64(42), + }, + { + cadence.Fix64Type, + fix64Value, + }, + { + cadence.UFix64Type, + uFix64Value, + }, + { + &cadence.ConstantSizedArrayType{ + Size: 3, + ElementType: cadence.IntType, + }, + cadence.NewArray([]cadence.Value{ + cadence.NewInt(1), + cadence.NewInt(2), + cadence.NewInt(3), + }), + }, + { + &cadence.DictionaryType{ + KeyType: cadence.StringType, + ElementType: cadence.IntType, + }, + cadence.NewDictionary([]cadence.KeyValuePair{ + { + Key: cadence.String("a"), + Value: cadence.NewInt(1), + }, + { + Key: cadence.String("b"), + Value: cadence.NewInt(2), + }, + { + Key: cadence.String("c"), + Value: cadence.NewInt(3), + }, + }), + }, + } + + var script = func(argType cadence.Type) []byte { + return []byte(fmt.Sprintf(` + transaction(x: %s) { + execute { + log(x) + } + } + `, argType.ID())) + } + + for _, tt := range tests { + t.Run(tt.argType.ID(), func(t *testing.T) { + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript(script(tt.argType)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err := tx.AddArgument(tt.arg) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + assert.Len(t, result.Logs, 1) + }) + } + + t.Run("Log", func(t *testing.T) { + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + script := []byte(` + transaction(x: Int) { + execute { + log(x * 6) + } + } + `) + + x := 7 + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err := tx.AddArgument(cadence.NewInt(x)) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + require.Len(t, result.Logs, 1) + assert.Equal(t, "42", result.Logs[0]) + }) +} + +func TestSubmitTransaction_ProposerSequence(t *testing.T) { + + t.Parallel() + + t.Run("Valid transaction increases sequence number", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + prevSeq := b.ServiceKey().SequenceNumber + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + + assert.Equal(t, prevSeq+1, b.ServiceKey().SequenceNumber) + }) + + t.Run("Reverted transaction increases sequence number", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + prevSeq := b.ServiceKey().SequenceNumber + script := []byte(` + transaction { + prepare(signer: &Account) {} + execute { panic("revert!") } + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + _, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, prevSeq+1, b.ServiceKey().SequenceNumber) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + assert.Len(t, tx1Result.Events, 0) + assert.IsType(t, &emulator.ExecutionError{}, tx1Result.Error) + }) +} + +func TestGetTransaction(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + t.Run("Nonexistent", func(t *testing.T) { + _, err := adapter.GetTransaction(context.Background(), flowsdk.EmptyID) + if assert.Error(t, err) { + assert.IsType(t, &emulator.TransactionNotFoundError{}, err) + } + }) + + t.Run("Existent", func(t *testing.T) { + tx2, err := adapter.GetTransaction(context.Background(), tx1.ID()) + require.NoError(t, err) + + assert.Equal(t, tx1.ID(), tx2.ID()) + }) +} + +func TestGetTransactionResult(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, counterAddress := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusUnknown, result.Status) + require.Empty(t, result.Events) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, result.Status) + + require.Len(t, result.Events, 3) + + event1 := result.Events[0] + assert.Equal(t, tx.ID(), event1.TransactionID) + assert.Equal(t, "flow.StorageCapabilityControllerIssued", event1.Type) + assert.Equal(t, 0, event1.EventIndex) + + event2 := result.Events[1] + assert.Equal(t, tx.ID(), event2.TransactionID) + assert.Equal(t, "flow.CapabilityPublished", event2.Type) + assert.Equal(t, 1, event2.EventIndex) + + event3 := result.Events[2] + addr, _ := common.BytesToAddress(counterAddress.Bytes()) + location := common.AddressLocation{ + Address: addr, + Name: "Counting", + } + assert.Equal(t, tx.ID(), event3.TransactionID) + assert.Equal(t, + string(location.TypeID(nil, "Counting.CountIncremented")), + event3.Type, + ) + assert.Equal(t, 2, event3.EventIndex) + fields := cadence.FieldsMappedByName(event3.Value) + assert.Len(t, fields, 1) + assert.Equal(t, + cadence.NewInt(2), + fields["count"], + ) +} + +// TestGetTxByBlockIDMethods tests the GetTransactionByBlockID and GetTransactionResultByBlockID +// methods return the correct transaction and transaction result for a given block ID. +func TestGetTxByBlockIDMethods(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + + const code = ` + transaction { + execute { + log("Hello, World!") + } + } + ` + + serviceKey := b.ServiceKey() + serviceAccountAddress := flowsdk.Address(serviceKey.Address) + + signer, err := serviceKey.Signer() + require.NoError(t, err) + + submittedTx := make([]*flowsdk.Transaction, 0) + + // submit 5 tx to be executed in a single block + for i := uint64(0); i < 5; i++ { + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, serviceKey.Index, serviceKey.SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + err = tx.SignEnvelope(serviceAccountAddress, serviceKey.Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // added to fix tx matching (nil vs empty slice) + tx.PayloadSignatures = []flow.TransactionSignature{} + + submittedTx = append(submittedTx, tx) + + // tx will be executed in the order they were submitted + serviceKey.SequenceNumber++ + } + + // execute the batch of transactions + block, expectedResults, err := b.ExecuteAndCommitBlock() + assert.NoError(t, err) + assert.Len(t, expectedResults, len(submittedTx)) + + results, err := adapter.GetTransactionResultsByBlockID(context.Background(), flowsdk.Identifier(block.ID())) + require.NoError(t, err) + assert.Len(t, results, len(submittedTx)) + + transactions, err := adapter.GetTransactionsByBlockID(context.Background(), flowsdk.Identifier(block.ID())) + require.NoError(t, err) + assert.Len(t, transactions, len(submittedTx)) + + // make sure the results and transactions returned match the transactions submitted, and are in + // the same order + for i, tx := range submittedTx { + assert.Equal(t, tx.ID(), transactions[i].ID()) + assert.Equal(t, submittedTx[i], transactions[i]) + + assert.Equal(t, tx.ID(), results[i].TransactionID) + assert.Equal(t, tx.ID(), expectedResults[i].TransactionID) + // note: expectedResults from ExecuteAndCommitBlock and results from GetTransactionResultsByBlockID + // use different representations. results is missing some data included in the flow.TransactionResult + // struct, so we can't compare them directly. + } +} + +const helloWorldContract = ` + access(all) contract HelloWorld { + + access(all) fun hello(): String { + return "Hello, World!" + } + } +` + +const callHelloTxTemplate = ` + import HelloWorld from 0x%s + transaction { + prepare() { + assert(HelloWorld.hello() == "Hello, World!") + } + } +` + +func TestHelloWorld_NewAccount(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + accountKey, accountSigner := accountKeys.NewWithSigner() + + contracts := []templates.Contract{ + { + Name: "HelloWorld", + Source: helloWorldContract, + }, + } + + createAccountTx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + require.NoError(t, err) + + createAccountTx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = createAccountTx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *createAccountTx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // createAccountTx status becomes TransactionStatusSealed + createAccountTxResult, err := adapter.GetTransactionResult(context.Background(), createAccountTx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, createAccountTxResult.Status) + + var newAccountAddress flowsdk.Address + for _, event := range createAccountTxResult.Events { + if event.Type != flowsdk.EventAccountCreated { + continue + } + accountCreatedEvent := flowsdk.AccountCreatedEvent(event) + newAccountAddress = accountCreatedEvent.Address() + break + } + + if newAccountAddress == flowsdk.EmptyAddress { + assert.Fail(t, "missing account created event") + } + + t.Logf("new account address: 0x%s", newAccountAddress.Hex()) + + account, err := adapter.GetAccount(context.Background(), newAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, newAccountAddress, account.Address) + + // call hello world code + + accountKey = account.Keys[0] + + callHelloCode := []byte(fmt.Sprintf(callHelloTxTemplate, newAccountAddress.Hex())) + callHelloTx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetScript(callHelloCode). + SetProposalKey(newAccountAddress, accountKey.Index, accountKey.SequenceNumber). + SetPayer(newAccountAddress) + + err = callHelloTx.SignEnvelope(newAccountAddress, accountKey.Index, accountSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *callHelloTx) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) +} + +func TestHelloWorld_UpdateAccount(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKey, accountSigner := accountKeys.NewWithSigner() + _ = accountSigner + + contracts := []templates.Contract{ + { + Name: "HelloWorld", + Source: `access(all) contract HelloWorld {}`, + }, + } + + createAccountTx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + createAccountTx. + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = createAccountTx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *createAccountTx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // createAccountTx status becomes TransactionStatusSealed + createAccountTxResult, err := adapter.GetTransactionResult(context.Background(), createAccountTx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, createAccountTxResult.Status) + + var newAccountAddress flowsdk.Address + for _, event := range createAccountTxResult.Events { + if event.Type != flowsdk.EventAccountCreated { + continue + } + accountCreatedEvent := flowsdk.AccountCreatedEvent(event) + newAccountAddress = accountCreatedEvent.Address() + break + } + + if newAccountAddress == flowsdk.EmptyAddress { + assert.Fail(t, "missing account created event") + } + + t.Logf("new account address: 0x%s", newAccountAddress.Hex()) + + account, err := adapter.GetAccount(context.Background(), newAccountAddress) + assert.NoError(t, err) + + accountKey = account.Keys[0] + + updateAccountCodeTx := templates.UpdateAccountContract( + newAccountAddress, + templates.Contract{ + Name: "HelloWorld", + Source: helloWorldContract, + }, + ) + updateAccountCodeTx. + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(newAccountAddress, accountKey.Index, accountKey.SequenceNumber). + SetPayer(newAccountAddress) + + err = updateAccountCodeTx.SignEnvelope(newAccountAddress, accountKey.Index, accountSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *updateAccountCodeTx) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // call hello world code + + accountKey.SequenceNumber++ + + callHelloCode := []byte(fmt.Sprintf(callHelloTxTemplate, newAccountAddress.Hex())) + callHelloTx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetScript(callHelloCode). + SetProposalKey(newAccountAddress, accountKey.Index, accountKey.SequenceNumber). + SetPayer(newAccountAddress) + + err = callHelloTx.SignEnvelope(newAccountAddress, accountKey.Index, accountSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *callHelloTx) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) +} + +func TestInfiniteTransaction(t *testing.T) { + + t.Parallel() + + const limit = 18 + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithTransactionMaxGasLimit(limit), + ) + + const code = ` + access(all) fun test() { + test() + } + + transaction { + execute { + test() + } + } + ` + + // Create a new account + + accountKeys := test.AccountKeyGenerator() + accountKey, signer := accountKeys.NewWithSigner() + accountAddress, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKey}, nil) + assert.NoError(t, err) + + // Sign the transaction using the new account. + // Do not test using the service account, + // as the computation limit is disabled for it + + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(limit). + SetProposalKey(accountAddress, 0, 0). + SetPayer(accountAddress) + + err = tx.SignEnvelope(accountAddress, 0, signer) + assert.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Execute tx + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) +} + +func TestTransactionExecutionLimit(t *testing.T) { + + t.Parallel() + + const code = ` + transaction { + execute { + var s: Int256 = 1024102410241024 + var i: Int256 = 0 + var a: Int256 = 7 + var b: Int256 = 5 + var c: Int256 = 2 + + while i < 150000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + } + } + ` + + t.Run("ExceedingLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 2000 + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithTransactionMaxGasLimit(limit), + ) + + // Create a new account + + accountKeys := test.AccountKeyGenerator() + accountKey, signer := accountKeys.NewWithSigner() + accountAddress, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKey}, nil) + assert.NoError(t, err) + + // Sign the transaction using the new account. + // Do not test using the service account, + // as the computation limit is disabled for it + + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(limit). + SetProposalKey(accountAddress, 0, 0). + SetPayer(accountAddress) + + err = tx.SignEnvelope(accountAddress, 0, signer) + assert.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Execute tx + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) + }) + + t.Run("SufficientLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 19000 + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithTransactionMaxGasLimit(limit), + ) + + // Create a new account + + accountKeys := test.AccountKeyGenerator() + accountKey, signer := accountKeys.NewWithSigner() + accountAddress, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKey}, nil) + assert.NoError(t, err) + + // Sign the transaction using the new account. + // Do not test using the service account, + // as the computation limit is disabled for it + + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(limit). + SetProposalKey(accountAddress, 0, 0). + SetPayer(accountAddress) + + err = tx.SignEnvelope(accountAddress, 0, signer) + assert.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Execute tx + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.NoError(t, result.Error) + }) +} + +func TestSubmitTransactionWithCustomLogger(t *testing.T) { + + t.Parallel() + + var memlog bytes.Buffer + memlogWrite := io.Writer(&memlog) + logger := zerolog.New(memlogWrite).Level(zerolog.DebugLevel) + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithLogger(logger), + emulator.WithTransactionFeesEnabled(true), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx1 + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Execute tx1 + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx1.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + + var meter Meter + scanner := bufio.NewScanner(&memlog) + for scanner.Scan() { + txt := scanner.Text() + if strings.Contains(txt, "transaction execution data") { + err = json.Unmarshal([]byte(txt), &meter) + } + } + + assert.NoError(t, err) + assert.Greater(t, meter.LedgerInteractionUsed, 0) + assert.Greater(t, meter.ComputationUsed, 0) + assert.Greater(t, meter.MemoryEstimate, 0) + assert.Greater(t, len(meter.ComputationIntensities), 0) + assert.Greater(t, len(meter.MemoryIntensities), 0) + +} + +type Meter struct { + LedgerInteractionUsed int `json:"ledgerInteractionUsed"` + ComputationUsed int `json:"computationUsed"` + MemoryEstimate int `json:"memoryEstimate"` + ComputationIntensities MeteredComputationIntensities `json:"computationIntensities"` + MemoryIntensities MeteredMemoryIntensities `json:"memoryIntensities"` +} + +type MeteredComputationIntensities map[common.ComputationKind]uint + +type MeteredMemoryIntensities map[common.MemoryKind]uint + +func IncrementHelper( + t *testing.T, + b emulator.Emulator, + adapter *emulator.SDKAdapter, + counterAddress flowsdk.Address, + addTwoScript string, + expected int, + expectSetup bool, +) { + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusUnknown, result.Status) + require.Empty(t, result.Events) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, result.Status) + + var expectedEventIndex int + if expectSetup { + require.Len(t, result.Events, 3) + + event1 := result.Events[0] + assert.Equal(t, tx.ID(), event1.TransactionID) + assert.Equal(t, "flow.StorageCapabilityControllerIssued", event1.Type) + assert.Equal(t, 0, event1.EventIndex) + + event2 := result.Events[1] + assert.Equal(t, tx.ID(), event2.TransactionID) + assert.Equal(t, "flow.CapabilityPublished", event2.Type) + assert.Equal(t, 1, event2.EventIndex) + + expectedEventIndex = 2 + } else { + require.Len(t, result.Events, 1) + expectedEventIndex = 0 + } + incrementedEvent := result.Events[expectedEventIndex] + + addr, _ := common.BytesToAddress(counterAddress.Bytes()) + location := common.AddressLocation{ + Address: addr, + Name: "Counting", + } + assert.Equal(t, tx.ID(), incrementedEvent.TransactionID) + assert.Equal(t, + string(location.TypeID(nil, "Counting.CountIncremented")), + incrementedEvent.Type, + ) + assert.Equal(t, expectedEventIndex, incrementedEvent.EventIndex) + fields := cadence.FieldsMappedByName(incrementedEvent.Value) + assert.Len(t, fields, 1) + assert.Equal(t, + cadence.NewInt(expected), + fields["count"], + ) +} + +// TestTransactionWithCadenceRandom checks Cadence's random function works +// within a transaction +func TestTransactionWithCadenceRandom(t *testing.T) { + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + code := ` + transaction { + prepare() { + assert(revertibleRandom() >= 0) + } + } + ` + callRandomTx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetScript([]byte(code)). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = callRandomTx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *callRandomTx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) +} + +func TestEVMTransaction(t *testing.T) { + serviceAddr := flowgo.Emulator.Chain().ServiceAddress() + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(bytes: [UInt8; 20]) { + execute { + let addr = EVM.EVMAddress(bytes: bytes) + log(addr) + } + } + `, + serviceAddr.HexWithPrefix(), + )) + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // generate random address + genArr := make([]cadence.Value, 20) + for i := range genArr { + genArr[i] = cadence.UInt8(i) + } + addressBytesArray := cadence.NewArray(genArr).WithType(stdlib.EVMAddressBytesCadenceType) + + tx := flowsdk.NewTransaction(). + SetScript(code). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err := tx.AddArgument(addressBytesArray) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + require.Len(t, result.Logs, 1) + require.Equal(t, result.Logs[0], fmt.Sprintf("A.%s.EVM.EVMAddress(bytes: %s)", serviceAddr, addressBytesArray.String())) +} diff --git a/integration/internal/emulator/tests/vm_test.go b/integration/internal/emulator/tests/vm_test.go new file mode 100644 index 00000000000..5e378589ca0 --- /dev/null +++ b/integration/internal/emulator/tests/vm_test.go @@ -0,0 +1,82 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests_test + +import ( + "testing" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go-sdk/test" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestVm(t *testing.T) { + + t.Parallel() + + test := func(eventEncodingVersion entities.EventEncodingVersion) { + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + t.Run("should be able to convert", func(t *testing.T) { + + t.Parallel() + + idGenerator := test.IdentifierGenerator() + + eventGenerator := test.EventGenerator(eventEncodingVersion) + event1, err := emulator.SDKEventToFlow(eventGenerator.New()) + assert.NoError(t, err) + + event2, err := emulator.SDKEventToFlow(eventGenerator.New()) + assert.NoError(t, err) + + txnId := flowgo.Identifier(idGenerator.New()) + output := fvm.ProcedureOutput{ + Logs: []string{"TestLog1", "TestLog2"}, + Events: []flowgo.Event{event1, event2}, + ComputationUsed: 5, + MemoryEstimate: 1211, + Err: nil, + } + + tr, err := emulator.VMTransactionResultToEmulator(txnId, output) + assert.NoError(t, err) + + assert.Equal(t, txnId, flowgo.Identifier(tr.TransactionID)) + assert.Equal(t, output.Logs, tr.Logs) + + flowEvents, err := emulator.FlowEventsToSDK(output.Events) + assert.NoError(t, err) + assert.Equal(t, flowEvents, tr.Events) + + assert.Equal(t, output.ComputationUsed, tr.ComputationUsed) + assert.Equal(t, output.MemoryEstimate, tr.MemoryEstimate) + assert.Equal(t, output.Err, tr.Error) + }) + }) + } + + test(entities.EventEncodingVersion_JSON_CDC_V0) + test(entities.EventEncodingVersion_CCF_V0) +} diff --git a/integration/internal/emulator/utils/unittest/fixtures.go b/integration/internal/emulator/utils/unittest/fixtures.go new file mode 100644 index 00000000000..70efad250ca --- /dev/null +++ b/integration/internal/emulator/utils/unittest/fixtures.go @@ -0,0 +1,61 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package unittest + +import ( + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go-sdk/test" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TransactionFixture() flowgo.TransactionBody { + return *emulator.SDKTransactionToFlow(*test.TransactionGenerator().New()) +} + +func StorableTransactionResultFixture(eventEncodingVersion entities.EventEncodingVersion) emulator.StorableTransactionResult { + events := test.EventGenerator(eventEncodingVersion) + + eventA, _ := emulator.SDKEventToFlow(events.New()) + eventB, _ := emulator.SDKEventToFlow(events.New()) + + return emulator.StorableTransactionResult{ + ErrorCode: 42, + ErrorMessage: "foo", + Logs: []string{"a", "b", "c"}, + Events: []flowgo.Event{ + eventA, + eventB, + }, + } +} + +func FullCollectionFixture(n int) flowgo.Collection { + transactions := make([]*flowgo.TransactionBody, n) + for i := 0; i < n; i++ { + tx := TransactionFixture() + transactions[i] = &tx + } + + return flowgo.Collection{ + Transactions: transactions, + } +} diff --git a/integration/localnet/builder/bootstrap.go b/integration/localnet/builder/bootstrap.go index e3ed2c91ea2..d1d63971d2b 100644 --- a/integration/localnet/builder/bootstrap.go +++ b/integration/localnet/builder/bootstrap.go @@ -390,14 +390,11 @@ func prepareExecutionService(container testnet.ContainerConfig, i int, n int) Se panic(err) } - enableNewIngestionEngine := true - service.Command = append(service.Command, "--triedir=/trie", fmt.Sprintf("--rpc-addr=%s:%s", container.ContainerName, testnet.GRPCPort), fmt.Sprintf("--cadence-tracing=%t", cadenceTracing), fmt.Sprintf("--extensive-tracing=%t", extesiveTracing), - fmt.Sprintf("--enable-new-ingestion-engine=%v", enableNewIngestionEngine), "--execution-data-dir=/data/execution-data", "--chunk-data-pack-dir=/data/chunk-data-pack", ) diff --git a/integration/localnet/conf/tempo-local.yaml b/integration/localnet/conf/tempo-local.yaml index d2f4089bbf8..fd453459942 100644 --- a/integration/localnet/conf/tempo-local.yaml +++ b/integration/localnet/conf/tempo-local.yaml @@ -41,7 +41,7 @@ storage: index_downsample_bytes: 1000 # number of bytes per index record encoding: zstd # block encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2 wal: - path: /tmp/tempo/wal # where to store the the wal locally + path: /tmp/tempo/wal # where to store the wal locally encoding: snappy # wal encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2 local: path: /tmp/tempo/blocks @@ -50,4 +50,4 @@ storage: queue_depth: 10000 overrides: - metrics_generator_processors: [service-graphs, span-metrics] \ No newline at end of file + metrics_generator_processors: [service-graphs, span-metrics] diff --git a/integration/utils/emulator_client.go b/integration/utils/emulator_client.go index 1af763e6cff..6d89ccf45ac 100644 --- a/integration/utils/emulator_client.go +++ b/integration/utils/emulator_client.go @@ -6,26 +6,25 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow-emulator/adapters" - emulator "github.com/onflow/flow-emulator/emulator" "github.com/rs/zerolog" sdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/templates" + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/model/flow" ) // EmulatorClient is a wrapper around the emulator to implement the same interface // used by the SDK client. Used for testing against the emulator. type EmulatorClient struct { - adapter *adapters.SDKAdapter + adapter *emulator.SDKAdapter } func NewEmulatorClient(blockchain emulator.Emulator) *EmulatorClient { logger := zerolog.Nop() - adapter := adapters.NewSDKAdapter(&logger, blockchain) + adapter := emulator.NewSDKAdapter(&logger, blockchain) client := &EmulatorClient{ adapter: adapter, } diff --git a/ledger/complete/mtrie/node/node.go b/ledger/complete/mtrie/node/node.go index 8446b9e2919..bd1d6b08140 100644 --- a/ledger/complete/mtrie/node/node.go +++ b/ledger/complete/mtrie/node/node.go @@ -211,18 +211,18 @@ func (n *Node) Path() *ledger.Path { return nil } -// Payload returns the the Node's payload. +// Payload returns the Node's payload. // Do NOT MODIFY returned slices! func (n *Node) Payload() *ledger.Payload { return n.payload } -// LeftChild returns the the Node's left child. +// LeftChild returns the Node's left child. // Only INTERIM nodes have children. // Do NOT MODIFY returned Node! func (n *Node) LeftChild() *Node { return n.lChild } -// RightChild returns the the Node's right child. +// RightChild returns the Node's right child. // Only INTERIM nodes have children. // Do NOT MODIFY returned Node! func (n *Node) RightChild() *Node { return n.rChild } diff --git a/ledger/complete/wal/checkpointer.go b/ledger/complete/wal/checkpointer.go index c9081134439..b67f2385440 100644 --- a/ledger/complete/wal/checkpointer.go +++ b/ledger/complete/wal/checkpointer.go @@ -1047,7 +1047,7 @@ func CopyCheckpointFile(filename string, from string, to string) ( []string, error, ) { - // It's possible that the trie dir does not yet exist. If not this will create the the required path + // It's possible that the trie dir does not yet exist. If not this will create the required path err := os.MkdirAll(to, 0700) if err != nil { return nil, err @@ -1091,7 +1091,7 @@ func CopyCheckpointFile(filename string, from string, to string) ( // the `to` directory func SoftlinkCheckpointFile(filename string, from string, to string) ([]string, error) { - // It's possible that the trie dir does not yet exist. If not this will create the the required path + // It's possible that the trie dir does not yet exist. If not this will create the required path err := os.MkdirAll(to, 0700) if err != nil { return nil, err diff --git a/model/convert/service_event.go b/model/convert/service_event.go index f8edf54aeae..7482f2ecc6f 100644 --- a/model/convert/service_event.go +++ b/model/convert/service_event.go @@ -1170,69 +1170,12 @@ func convertVersionBoundaries(array cadence.Array) ( boundaries := make([]flow.VersionBoundary, len(array.Values)) for i, cadenceVal := range array.Values { - boundary, err := DecodeCadenceValue( - fmt.Sprintf(".Values[%d]", i), - cadenceVal, - func(structVal cadence.Struct) ( - flow.VersionBoundary, - error, - ) { - if structVal.Type() == nil { - return flow.VersionBoundary{}, fmt.Errorf("VersionBoundary struct doesn't have type") - } - - fields := cadence.FieldsMappedByName(structVal) - - const expectedFieldCount = 2 - if len(fields) < expectedFieldCount { - return flow.VersionBoundary{}, fmt.Errorf( - "incorrect number of fields (%d != %d)", - len(fields), - expectedFieldCount, - ) - } - - blockHeightValue, err := getField[cadence.Value](fields, "blockHeight") - if err != nil { - return flow.VersionBoundary{}, fmt.Errorf("failed to decode VersionBoundary struct: %w", err) - } - - versionValue, err := getField[cadence.Value](fields, "version") - if err != nil { - return flow.VersionBoundary{}, fmt.Errorf("failed to decode VersionBoundary struct: %w", err) - } - - height, err := DecodeCadenceValue( - ".blockHeight", - blockHeightValue, - func(cadenceVal cadence.UInt64) ( - uint64, - error, - ) { - return uint64(cadenceVal), nil - }, - ) - if err != nil { - return flow.VersionBoundary{}, err - } - - version, err := DecodeCadenceValue( - ".version", - versionValue, - convertSemverVersion, - ) - if err != nil { - return flow.VersionBoundary{}, err - } - - return flow.VersionBoundary{ - BlockHeight: height, - Version: version, - }, nil - }, - ) + boundary, err := VersionBoundary(cadenceVal) if err != nil { - return nil, err + return nil, decodeError{ + location: fmt.Sprintf(".Values[%d]", i), + err: err, + } } boundaries[i] = boundary } @@ -1240,6 +1183,75 @@ func convertVersionBoundaries(array cadence.Array) ( return boundaries, nil } +// VersionBoundary decodes a single version boundary from the given Cadence value. +func VersionBoundary(value cadence.Value) ( + flow.VersionBoundary, + error, +) { + boundary, err := DecodeCadenceValue( + "VersionBoundary", + value, + func(structVal cadence.Struct) ( + flow.VersionBoundary, + error, + ) { + if structVal.Type() == nil { + return flow.VersionBoundary{}, fmt.Errorf("VersionBoundary struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(structVal) + + const expectedFieldCount = 2 + if len(fields) < expectedFieldCount { + return flow.VersionBoundary{}, fmt.Errorf( + "incorrect number of fields (%d != %d)", + len(fields), + expectedFieldCount, + ) + } + + blockHeightValue, err := getField[cadence.Value](fields, "blockHeight") + if err != nil { + return flow.VersionBoundary{}, fmt.Errorf("failed to decode VersionBoundary struct: %w", err) + } + + versionValue, err := getField[cadence.Value](fields, "version") + if err != nil { + return flow.VersionBoundary{}, fmt.Errorf("failed to decode VersionBoundary struct: %w", err) + } + + height, err := DecodeCadenceValue( + ".blockHeight", + blockHeightValue, + func(cadenceVal cadence.UInt64) ( + uint64, + error, + ) { + return uint64(cadenceVal), nil + }, + ) + if err != nil { + return flow.VersionBoundary{}, err + } + + version, err := DecodeCadenceValue( + ".version", + versionValue, + convertSemverVersion, + ) + if err != nil { + return flow.VersionBoundary{}, err + } + + return flow.VersionBoundary{ + BlockHeight: height, + Version: version, + }, nil + }, + ) + return boundary, err +} + func convertSemverVersion(structVal cadence.Struct) ( string, error, diff --git a/model/flow/transaction_timing.go b/model/flow/transaction_timing.go index 5f2c58812de..3a9da43eee1 100644 --- a/model/flow/transaction_timing.go +++ b/model/flow/transaction_timing.go @@ -10,6 +10,7 @@ type TransactionTiming struct { Received time.Time Finalized time.Time Executed time.Time + Sealed time.Time } func (t TransactionTiming) ID() Identifier { diff --git a/module/hotstuff.go b/module/hotstuff.go index 8610ce0bce1..785aeed9988 100644 --- a/module/hotstuff.go +++ b/module/hotstuff.go @@ -22,7 +22,7 @@ type HotStuff interface { // // Block proposals must be submitted in order and only if they extend a // block already known to HotStuff core. - SubmitProposal(proposal *model.Proposal) + SubmitProposal(proposal *model.SignedProposal) } // HotStuffFollower is run by non-consensus nodes to observe the block chain diff --git a/module/jobqueue/README.md b/module/jobqueue/README.md index e36bc060144..9c9dbf8f239 100644 --- a/module/jobqueue/README.md +++ b/module/jobqueue/README.md @@ -77,7 +77,7 @@ The jobqueue architecture is optimized for "pull" style processes, where the job Some use cases might require "push" style jobs where there is a job producer that create new jobs, and a consumer that processes work from the producer. This is possible with the jobqueue, but requires the producer persist the jobs to a database, then implement the `Head` and `AtIndex` methods that allow accessing jobs by sequential `uint64` indexes. ### TODOs -1. Jobs at different index are processed in parallel, it's possible that there is a job takes a long time to work on, and causing too many completed jobs cached in memory before being used to update the the last processed job index. +1. Jobs at different index are processed in parallel, it's possible that there is a job takes a long time to work on, and causing too many completed jobs cached in memory before being used to update the last processed job index. `maxSearchAhead` will allow the job consumer to stop consume more blocks if too many jobs are completed, but the job at index lastProcesssed + 1 has not been unprocessed yet. The difference between `maxSearchAhead` and `maxProcessing` is that: `maxProcessing` allows at most `maxProcessing` number of works to process jobs. However, even if there is worker available, it might not be assigned to a job, because the job at index lastProcesssed +1 has not been done, it won't work on an job with index higher than `lastProcesssed + maxSearchAhead`. 2. accept callback to get notified when the consecutive job index is finished. diff --git a/module/metrics.go b/module/metrics.go index f43c8b9325e..15204afe081 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -1019,9 +1019,6 @@ type ExecutionMetrics interface { // ExecutionCollectionRequestSent reports when a request for a collection is sent to a collection node ExecutionCollectionRequestSent() - // Unused - ExecutionCollectionRequestRetried() - // ExecutionSync reports when the state syncing is triggered or stopped. ExecutionSync(syncing bool) @@ -1076,6 +1073,10 @@ type TransactionMetrics interface { // works if the transaction was earlier added as received. TransactionFinalized(txID flow.Identifier, when time.Time) + // TransactionSealed reports the time spent between the transaction being received and sealed. Reporting only + // works if the transaction was earlier added as received. + TransactionSealed(txID flow.Identifier, when time.Time) + // TransactionExecuted reports the time spent between the transaction being received and executed. Reporting only // works if the transaction was earlier added as received. TransactionExecuted(txID flow.Identifier, when time.Time) diff --git a/module/metrics/execution.go b/module/metrics/execution.go index 37d113061b7..e269a70de64 100644 --- a/module/metrics/execution.go +++ b/module/metrics/execution.go @@ -849,7 +849,6 @@ func (ec *ExecutionCollector) ExecutionTransactionExecuted( if stats.Failed { ec.totalFailedTransactionsCounter.Inc() } - } // ExecutionChunkDataPackGenerated reports stats on chunk data pack generation @@ -982,10 +981,6 @@ func (ec *ExecutionCollector) ExecutionCollectionRequestSent() { ec.collectionRequestSent.Inc() } -func (ec *ExecutionCollector) ExecutionCollectionRequestRetried() { - ec.collectionRequestRetried.Inc() -} - func (ec *ExecutionCollector) ExecutionBlockDataUploadStarted() { ec.blockDataUploadsInProgress.Inc() } diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 17460bf460a..3a18e5f418b 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -193,7 +193,6 @@ func (nc *NoopCollector) ReadValuesSize(byte uint64) func (nc *NoopCollector) ReadDuration(duration time.Duration) {} func (nc *NoopCollector) ReadDurationPerItem(duration time.Duration) {} func (nc *NoopCollector) ExecutionCollectionRequestSent() {} -func (nc *NoopCollector) ExecutionCollectionRequestRetried() {} func (nc *NoopCollector) RuntimeTransactionParsed(dur time.Duration) {} func (nc *NoopCollector) RuntimeTransactionChecked(dur time.Duration) {} func (nc *NoopCollector) RuntimeTransactionInterpreted(dur time.Duration) {} @@ -214,6 +213,7 @@ func (nc *NoopCollector) ScriptExecutionNotIndexed() func (nc *NoopCollector) TransactionResultFetched(dur time.Duration, size int) {} func (nc *NoopCollector) TransactionReceived(txID flow.Identifier, when time.Time) {} func (nc *NoopCollector) TransactionFinalized(txID flow.Identifier, when time.Time) {} +func (nc *NoopCollector) TransactionSealed(txID flow.Identifier, when time.Time) {} func (nc *NoopCollector) TransactionExecuted(txID flow.Identifier, when time.Time) {} func (nc *NoopCollector) TransactionExpired(txID flow.Identifier) {} func (nc *NoopCollector) TransactionValidated() {} diff --git a/module/metrics/transaction.go b/module/metrics/transaction.go index 8bea3e9adea..474d1ba5fe1 100644 --- a/module/metrics/transaction.go +++ b/module/metrics/transaction.go @@ -18,9 +18,11 @@ type TransactionCollector struct { logTimeToFinalized bool logTimeToExecuted bool logTimeToFinalizedExecuted bool + logTimeToSealed bool timeToFinalized prometheus.Summary timeToExecuted prometheus.Summary timeToFinalizedExecuted prometheus.Summary + timeToSealed prometheus.Summary transactionSubmission *prometheus.CounterVec transactionSize prometheus.Histogram scriptExecutedDuration *prometheus.HistogramVec @@ -40,6 +42,7 @@ func NewTransactionCollector( logTimeToFinalized bool, logTimeToExecuted bool, logTimeToFinalizedExecuted bool, + logTimeToSealed bool, ) *TransactionCollector { tc := &TransactionCollector{ @@ -48,6 +51,7 @@ func NewTransactionCollector( logTimeToFinalized: logTimeToFinalized, logTimeToExecuted: logTimeToExecuted, logTimeToFinalizedExecuted: logTimeToFinalizedExecuted, + logTimeToSealed: logTimeToSealed, timeToFinalized: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_finalized_seconds", Namespace: namespaceAccess, @@ -91,6 +95,20 @@ func NewTransactionCollector( AgeBuckets: 5, BufCap: 500, }), + timeToSealed: promauto.NewSummary(prometheus.SummaryOpts{ + Name: "time_to_seal_seconds", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionTiming, + Help: "the duration of how long it took between the transaction was received until it was sealed", + Objectives: map[float64]float64{ + 0.01: 0.001, + 0.5: 0.05, + 0.99: 0.001, + }, + MaxAge: 10 * time.Minute, + AgeBuckets: 5, + BufCap: 500, + }), transactionSubmission: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "transaction_submission", Namespace: namespaceAccess, @@ -269,6 +287,27 @@ func (tc *TransactionCollector) TransactionExecuted(txID flow.Identifier, when t } } +func (tc *TransactionCollector) TransactionSealed(txID flow.Identifier, when time.Time) { + t, updated := tc.transactionTimings.Adjust(txID, func(t *flow.TransactionTiming) *flow.TransactionTiming { + t.Sealed = when + return t + }) + + if !updated { + tc.log.Debug(). + Str("transaction_id", txID.String()). + Msg("failed to update TransactionSealed metric") + return + } + + tc.trackTTS(t, tc.logTimeToSealed) + + // remove transaction timing from mempool if sealed + if !t.Sealed.IsZero() { + tc.transactionTimings.Remove(txID) + } +} + func (tc *TransactionCollector) trackTTF(t *flow.TransactionTiming, log bool) { if t.Received.IsZero() || t.Finalized.IsZero() { return @@ -317,6 +356,20 @@ func (tc *TransactionCollector) trackTTFE(t *flow.TransactionTiming, log bool) { } } +func (tc *TransactionCollector) trackTTS(t *flow.TransactionTiming, log bool) { + if t.Received.IsZero() || t.Sealed.IsZero() { + return + } + duration := t.Sealed.Sub(t.Received).Seconds() + + tc.timeToSealed.Observe(duration) + + if log { + tc.log.Info().Str("transaction_id", t.TransactionID.String()).Float64("duration", duration). + Msg("transaction time to sealed") + } +} + func (tc *TransactionCollector) TransactionSubmissionFailed() { tc.transactionSubmission.WithLabelValues("failed").Inc() } diff --git a/module/mock/access_metrics.go b/module/mock/access_metrics.go index 21ecc03740f..df3cb8ad8c2 100644 --- a/module/mock/access_metrics.go +++ b/module/mock/access_metrics.go @@ -138,6 +138,11 @@ func (_m *AccessMetrics) TransactionResultFetched(dur time.Duration, size int) { _m.Called(dur, size) } +// TransactionSealed provides a mock function with given fields: txID, when +func (_m *AccessMetrics) TransactionSealed(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) +} + // TransactionSubmissionFailed provides a mock function with given fields: func (_m *AccessMetrics) TransactionSubmissionFailed() { _m.Called() diff --git a/module/mock/execution_metrics.go b/module/mock/execution_metrics.go index 6adc14e02a2..619fca3d60e 100644 --- a/module/mock/execution_metrics.go +++ b/module/mock/execution_metrics.go @@ -71,11 +71,6 @@ func (_m *ExecutionMetrics) ExecutionCollectionExecuted(dur time.Duration, stats _m.Called(dur, stats) } -// ExecutionCollectionRequestRetried provides a mock function with given fields: -func (_m *ExecutionMetrics) ExecutionCollectionRequestRetried() { - _m.Called() -} - // ExecutionCollectionRequestSent provides a mock function with given fields: func (_m *ExecutionMetrics) ExecutionCollectionRequestSent() { _m.Called() diff --git a/module/mock/hot_stuff.go b/module/mock/hot_stuff.go index 4801da856c7..7c1ba755027 100644 --- a/module/mock/hot_stuff.go +++ b/module/mock/hot_stuff.go @@ -60,7 +60,7 @@ func (_m *HotStuff) Start(_a0 irrecoverable.SignalerContext) { } // SubmitProposal provides a mock function with given fields: proposal -func (_m *HotStuff) SubmitProposal(proposal *model.Proposal) { +func (_m *HotStuff) SubmitProposal(proposal *model.SignedProposal) { _m.Called(proposal) } diff --git a/module/mock/transaction_metrics.go b/module/mock/transaction_metrics.go index 9345b934a9a..5e96e52a4ad 100644 --- a/module/mock/transaction_metrics.go +++ b/module/mock/transaction_metrics.go @@ -39,6 +39,11 @@ func (_m *TransactionMetrics) TransactionResultFetched(dur time.Duration, size i _m.Called(dur, size) } +// TransactionSealed provides a mock function with given fields: txID, when +func (_m *TransactionMetrics) TransactionSealed(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) +} + // TransactionSubmissionFailed provides a mock function with given fields: func (_m *TransactionMetrics) TransactionSubmissionFailed() { _m.Called() diff --git a/module/signature/signing_tags.go b/module/signature/signing_tags.go index f2d142b4253..00d7e06903c 100644 --- a/module/signature/signing_tags.go +++ b/module/signature/signing_tags.go @@ -61,7 +61,7 @@ var ( // NewBLSHasher returns a hasher to be used for BLS signing and verifying // in the protocol and abstracts the hasher details from the protocol logic. // -// The hasher returned is the the expand-message step in the BLS hash-to-curve. +// The hasher returned is the expand-message step in the BLS hash-to-curve. // It uses a xof (extendable output function) based on KMAC128. It therefore has // 128-bytes outputs. func NewBLSHasher(tag string) hash.Hasher { diff --git a/module/state_synchronization/indexer/collection_executed_metric.go b/module/state_synchronization/indexer/collection_executed_metric.go index 814afbb3325..bc1ee3fd341 100644 --- a/module/state_synchronization/indexer/collection_executed_metric.go +++ b/module/state_synchronization/indexer/collection_executed_metric.go @@ -25,6 +25,8 @@ type CollectionExecutedMetricImpl struct { collections storage.Collections blocks storage.Blocks + + blockTransactions *stdmap.IdentifierMap // Map to track transactions for each block for sealed metrics } func NewCollectionExecutedMetricImpl( @@ -35,6 +37,7 @@ func NewCollectionExecutedMetricImpl( blocksToMarkExecuted *stdmap.Times, collections storage.Collections, blocks storage.Blocks, + blockTransactions *stdmap.IdentifierMap, ) (*CollectionExecutedMetricImpl, error) { return &CollectionExecutedMetricImpl{ log: log, @@ -44,16 +47,32 @@ func NewCollectionExecutedMetricImpl( blocksToMarkExecuted: blocksToMarkExecuted, collections: collections, blocks: blocks, + blockTransactions: blockTransactions, }, nil } // CollectionFinalized tracks collections to mark finalized func (c *CollectionExecutedMetricImpl) CollectionFinalized(light flow.LightCollection) { - if ti, found := c.collectionsToMarkFinalized.ByID(light.ID()); found { + lightID := light.ID() + if ti, found := c.collectionsToMarkFinalized.ByID(lightID); found { + + block, err := c.blocks.ByCollectionID(lightID) + if err != nil { + c.log.Warn().Err(err).Msg("could not find block by collection ID") + return + } + blockID := block.ID() + for _, t := range light.Transactions { c.accessMetrics.TransactionFinalized(t, ti) + + err = c.blockTransactions.Append(blockID, t) + if err != nil { + c.log.Warn().Err(err).Msg("could not append finalized tx to track sealed transactions") + continue + } } - c.collectionsToMarkFinalized.Remove(light.ID()) + c.collectionsToMarkFinalized.Remove(lightID) } } @@ -88,6 +107,24 @@ func (c *CollectionExecutedMetricImpl) BlockFinalized(block *flow.Block) { for _, t := range l.Transactions { c.accessMetrics.TransactionFinalized(t, now) + err = c.blockTransactions.Append(blockID, t) + + if err != nil { + c.log.Warn().Err(err).Msg("could not append finalized tx to track sealed transactions") + continue + } + } + } + + // Process block seals + for _, s := range block.Payload.Seals { + transactions, found := c.blockTransactions.Get(s.BlockID) + + if found { + for _, t := range transactions { + c.accessMetrics.TransactionSealed(t, now) + } + c.blockTransactions.Remove(s.BlockID) } } diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index aede5d6ac4f..22a6d16ea2a 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -287,9 +287,10 @@ func (c *IndexerCore) updateProgramCache(header *flow.Header, events []flow.Even tx.AddInvalidator(&accessInvalidator{ programs: &programInvalidator{ - invalidated: updatedContracts, + invalidated: updatedContracts, + invalidateAll: hasAuthorizedTransaction(collections, c.serviceAddress), }, - meterParamOverrides: &meterParamOverridesInvalidator{ + executionParameters: &executionParametersInvalidator{ invalidateAll: hasAuthorizedTransaction(collections, c.serviceAddress), }, }) @@ -325,7 +326,7 @@ func (c *IndexerCore) indexRegisters(registers map[ledger.Path]*ledger.Payload, return c.registers.Store(regEntries, height) } -// HandleCollection handles the response of the a collection request made earlier when a block was received. +// HandleCollection handles the response of the collection request made earlier when a block was received. // No errors expected during normal operations. func HandleCollection( collection *flow.Collection, diff --git a/module/state_synchronization/indexer/indexer_core_test.go b/module/state_synchronization/indexer/indexer_core_test.go index 5fd93d4b824..f446c0740a0 100644 --- a/module/state_synchronization/indexer/indexer_core_test.go +++ b/module/state_synchronization/indexer/indexer_core_test.go @@ -197,6 +197,8 @@ func (i *indexCoreTest) initIndexer() *indexCoreTest { require.NoError(i.t, err) blocksToMarkExecuted, err := stdmap.NewTimes(100) require.NoError(i.t, err) + blockTransactions, err := stdmap.NewIdentifierMap(100) + require.NoError(i.t, err) log := zerolog.New(os.Stdout) blocks := storagemock.NewBlocks(i.t) @@ -209,6 +211,7 @@ func (i *indexCoreTest) initIndexer() *indexCoreTest { blocksToMarkExecuted, i.collections, blocks, + blockTransactions, ) require.NoError(i.t, err) diff --git a/module/state_synchronization/indexer/util.go b/module/state_synchronization/indexer/util.go index d75f5541a02..5526776716b 100644 --- a/module/state_synchronization/indexer/util.go +++ b/module/state_synchronization/indexer/util.go @@ -92,15 +92,15 @@ var _ derived.TransactionInvalidator = (*accessInvalidator)(nil) // accessInvalidator is a derived.TransactionInvalidator that invalidates programs and meter param overrides. type accessInvalidator struct { programs *programInvalidator - meterParamOverrides *meterParamOverridesInvalidator + executionParameters *executionParametersInvalidator } func (inv *accessInvalidator) ProgramInvalidator() derived.ProgramInvalidator { return inv.programs } -func (inv *accessInvalidator) MeterParamOverridesInvalidator() derived.MeterParamOverridesInvalidator { - return inv.meterParamOverrides +func (inv *accessInvalidator) ExecutionParametersInvalidator() derived.ExecutionParametersInvalidator { + return inv.executionParameters } var _ derived.ProgramInvalidator = (*programInvalidator)(nil) @@ -121,17 +121,17 @@ func (inv *programInvalidator) ShouldInvalidateEntry(location common.AddressLoca return inv.invalidateAll || ok } -var _ derived.MeterParamOverridesInvalidator = (*meterParamOverridesInvalidator)(nil) +var _ derived.ExecutionParametersInvalidator = (*executionParametersInvalidator)(nil) -// meterParamOverridesInvalidator is a derived.MeterParamOverridesInvalidator that invalidates meter param overrides. -type meterParamOverridesInvalidator struct { +// executionParametersInvalidator is a derived.ExecutionParametersInvalidator that invalidates meter param overrides and execution version. +type executionParametersInvalidator struct { invalidateAll bool } -func (inv *meterParamOverridesInvalidator) ShouldInvalidateEntries() bool { +func (inv *executionParametersInvalidator) ShouldInvalidateEntries() bool { return inv.invalidateAll } -func (inv *meterParamOverridesInvalidator) ShouldInvalidateEntry(_ struct{}, _ derived.MeterParamOverrides, _ *snapshot.ExecutionSnapshot) bool { +func (inv *executionParametersInvalidator) ShouldInvalidateEntry(_ struct{}, _ derived.StateExecutionParameters, _ *snapshot.ExecutionSnapshot) bool { return inv.invalidateAll } diff --git a/module/trace/constants.go b/module/trace/constants.go index 9e8ab96f3ad..1241ce765a0 100644 --- a/module/trace/constants.go +++ b/module/trace/constants.go @@ -185,11 +185,9 @@ const ( FVMEnvRandomSourceHistoryProvider SpanName = "fvm.env.randomSourceHistoryProvider" FVMEnvCreateAccount SpanName = "fvm.env.createAccount" FVMEnvAddAccountKey SpanName = "fvm.env.addAccountKey" - FVMEnvAddEncodedAccountKey SpanName = "fvm.env.addEncodedAccountKey" FVMEnvAccountKeysCount SpanName = "fvm.env.accountKeysCount" FVMEnvGetAccountKey SpanName = "fvm.env.getAccountKey" FVMEnvRevokeAccountKey SpanName = "fvm.env.revokeAccountKey" - FVMEnvRevokeEncodedAccountKey SpanName = "fvm.env.revokeEncodedAccountKey" FVMEnvUpdateAccountContractCode SpanName = "fvm.env.updateAccountContractCode" FVMEnvGetAccountContractCode SpanName = "fvm.env.getAccountContractCode" FVMEnvRemoveAccountContractCode SpanName = "fvm.env.removeAccountContractCode" diff --git a/network/message/Makefile b/network/message/Makefile index 9c83ad1c9dd..a9612fc3564 100644 --- a/network/message/Makefile +++ b/network/message/Makefile @@ -1,4 +1,4 @@ -# To re-generate the the protobuf go code, install tools first: +# To re-generate the protobuf go code, install tools first: # ``` # cd flow-go # make install-tools diff --git a/state/protocol/prg/prg.go b/state/protocol/prg/prg.go index 36b3b77751d..d17fd1a9ac0 100644 --- a/state/protocol/prg/prg.go +++ b/state/protocol/prg/prg.go @@ -17,7 +17,7 @@ const RandomSourceLength = crypto.SignatureLenBLSBLS12381 // The diversifier is used to further diversify the PRGs beyond the customizer. A diversifier // can be a slice of any length. If no diversification is needed, `diversifier` can be `nil`. // -// The function uses an extendable-output function (xof) to extract and expand the the input source, +// The function uses an extendable-output function (xof) to extract and expand the input source, // so that any source with enough entropy (at least 128 bits) can be used (no need to pre-hash). // Current implementation generates a ChaCha20-based CSPRG. // diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 7265fb895fa..a5e911b3771 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -23,7 +23,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "b921d979dd58c55c43f8918cf653578697ec75d8cc2782a0c447b8ee0c39b544" +const GenesisStateCommitmentHex = "c42fc978c2702793d2640e3ed8644ba54db4e92aa5d0501234dfbb9bbc5784fd" var GenesisStateCommitment flow.StateCommitment @@ -87,10 +87,10 @@ func genesisCommitHexByChainID(chainID flow.ChainID) string { return GenesisStateCommitmentHex } if chainID == flow.Testnet { - return "042170743acd6c7e8d14bb91b7296719cb61448c222a30163feb108d9994fd58" + return "e29456decb9ee90ad3ed1e1239383c18897b031ea851ff07f5f616657df4d4a0" } if chainID == flow.Sandboxnet { return "e1c08b17f9e5896f03fe28dd37ca396c19b26628161506924fbf785834646ea1" } - return "c67d4b16a38b4bf0d9d5b6c5f75c55079af969b847538ceec87bc00af1c50516" + return "e1989abf50fba23015251a313eefe2ceff45639a75252f4da5970dcda32dd95e" }