diff --git a/Makefile b/Makefile index 8fa2b06495..d50c2db78b 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ GO_LOOM_GIT_REV = HEAD # Specifies the loomnetwork/transfer-gateway branch/revision to use. TG_GIT_REV = HEAD # loomnetwork/go-ethereum loomchain branch -ETHEREUM_GIT_REV = cce1b3f69354033160583e5576169f9b309ee62e +ETHEREUM_GIT_REV = 6128fa1a8c767035d3da6ef0c27ebb7778ce3713 # use go-plugin we get 'timeout waiting for connection info' error HASHICORP_GIT_REV = f4c3476bd38585f9ec669d10ed1686abd52b9961 LEVIGO_GIT_REV = c42d9e0ca023e2198120196f842701bb4c55d7b9 @@ -197,15 +197,13 @@ $(BINANCE_TGORACLE_DIR): git clone -q git@github.com:loomnetwork/binance-tgoracle.git $@ cd $(BINANCE_TGORACLE_DIR) && git checkout master && git pull && git checkout $(BINANCE_TG_GIT_REV) -$(PROMETHEUS_PROCFS_DIR): - # Temp workaround for https://github.com/prometheus/procfs/issues/221 - git clone -q git@github.com:prometheus/procfs $(PROMETHEUS_PROCFS_DIR) - cd $(PROMETHEUS_PROCFS_DIR) && git checkout master && git pull && git checkout d3b299e382e6acf1baa852560d862eca4ff643c8 - validators-tool: $(TRANSFER_GATEWAY_DIR) go build -tags gateway -o e2e/validators-tool $(PKG)/e2e/cmd -deps: $(PLUGIN_DIR) $(GO_ETHEREUM_DIR) $(SSHA3_DIR) $(PROMETHEUS_PROCFS_DIR) +deps: $(PLUGIN_DIR) $(GO_ETHEREUM_DIR) $(SSHA3_DIR) + # Temp workaround for https://github.com/prometheus/procfs/issues/221 + git clone -q git@github.com:prometheus/procfs $(PROMETHEUS_PROCFS_DIR) + cd $(PROMETHEUS_PROCFS_DIR) && git checkout master && git pull && git checkout d3b299e382e6acf1baa852560d862eca4ff643c8 # Lock down Prometheus golang client to v1.2.1 (newer versions use a different protobuf version) git clone -q git@github.com:prometheus/client_golang $(GOPATH)/src/github.com/prometheus/client_golang cd $(GOPATH)/src/github.com/prometheus/client_golang && git checkout master && git pull && git checkout v1.2.1 diff --git a/abci/backend/tendermint.go b/abci/backend/tendermint.go index ac69b568a5..e1dcc04396 100644 --- a/abci/backend/tendermint.go +++ b/abci/backend/tendermint.go @@ -17,7 +17,6 @@ import ( "github.com/spf13/viper" abci_server "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" @@ -110,7 +109,6 @@ type Backend interface { // Returns the TCP or UNIX socket address the backend RPC server listens on RPCAddress() (string, error) EventBus() *types.EventBus // TODO: doesn't seem to be used, remove it - LoadBlockHeader(height int64) (*abci.Header, error) } type TendermintBackend struct { @@ -476,32 +474,3 @@ func (b *TendermintBackend) RunForever() { } }) } - -// LoadBlockHeader loads the block header for the given height. -func (b *TendermintBackend) LoadBlockHeader(height int64) (*abci.Header, error) { - config, err := b.parseConfig() - if err != nil { - return nil, err - } - - blockDB := dbm.NewDB("blockstore", dbm.DBBackendType(config.DBBackend), config.DBDir()) - defer blockDB.Close() - - blockStore := blockchain.NewBlockStore(blockDB) - blockMeta := blockStore.LoadBlockMeta(height) - if blockMeta == nil { - return nil, fmt.Errorf("block meta not found at height %v", height) - } - // Return just the data that blockHeaderFromAbciHeader() needs - return &abci.Header{ - ChainID: blockMeta.Header.ChainID, - Height: blockMeta.Header.Height, - Time: blockMeta.Header.Time, - NumTxs: blockMeta.Header.NumTxs, - LastBlockId: abci.BlockID{ - Hash: blockMeta.Header.LastBlockID.Hash, - }, - ValidatorsHash: blockMeta.Header.ValidatorsHash, - AppHash: blockMeta.Header.AppHash, - }, nil -} diff --git a/app.go b/app.go index 6155480a3c..75fdf6ceda 100644 --- a/app.go +++ b/app.go @@ -5,11 +5,8 @@ import ( "context" "encoding/binary" "encoding/hex" - "errors" "fmt" - "sync/atomic" "time" - "unsafe" "github.com/loomnetwork/go-loom/config" "github.com/loomnetwork/go-loom/util" @@ -54,7 +51,6 @@ type State interface { SetFeature(string, bool) SetMinBuildNumber(uint64) ChangeConfigSetting(name, value string) error - EVMState() *EVMState } type StoreState struct { @@ -64,7 +60,6 @@ type StoreState struct { validators loom.ValidatorSet getValidatorSet GetValidatorSet config *cctypes.Config - evmState *EVMState } var _ = State(&StoreState{}) @@ -106,11 +101,6 @@ func (s *StoreState) WithOnChainConfig(config *cctypes.Config) *StoreState { return s } -func (s *StoreState) WithEVMState(evmState *EVMState) *StoreState { - s.evmState = evmState - return s -} - func (s *StoreState) Range(prefix []byte) plugin.RangeData { return s.store.Range(prefix) } @@ -151,10 +141,6 @@ func (s *StoreState) Context() context.Context { return s.ctx } -func (s *StoreState) EVMState() *EVMState { - return s.evmState -} - const ( featurePrefix = "feature" MinBuildKey = "minbuild" @@ -248,7 +234,6 @@ func (s *StoreState) WithContext(ctx context.Context) State { ctx: ctx, validators: s.validators, getValidatorSet: s.getValidatorSet, - evmState: s.evmState, } } @@ -259,7 +244,6 @@ func (s *StoreState) WithPrefix(prefix []byte) State { ctx: s.ctx, validators: s.validators, getValidatorSet: s.getValidatorSet, - evmState: s.evmState, } } @@ -362,9 +346,12 @@ type CommittedTx struct { txHash []byte } -type ApplicationParams struct { - Store store.VersionedKVStore - Init func(State) error +type Application struct { + lastBlockHeader abci.Header + curBlockHeader abci.Header + curBlockHash []byte + Store store.VersionedKVStore + Init func(State) error TxHandler QueryHandler EventHandler @@ -378,18 +365,10 @@ type ApplicationParams struct { CreateContractUpkeepHandler func(state State) (KarmaHandler, error) GetValidatorSet GetValidatorSet EventStore store.EventStore + config *cctypes.Config + childTxRefs []evmaux.ChildTxRef // links Tendermint txs to EVM txs ReceiptsVersion int32 - EVMState *EVMState -} - -type Application struct { - ApplicationParams - lastBlockHeader unsafe.Pointer // *abci.Header - curBlockHeader abci.Header - curBlockHash []byte - config *cctypes.Config - childTxRefs []evmaux.ChildTxRef // links Tendermint txs to EVM txs - committedTxs []CommittedTx + committedTxs []CommittedTx } var _ abci.Application = &Application{} @@ -467,14 +446,6 @@ func init() { }, []string{}) } -func NewApplication(params ApplicationParams, lastBlockHeader *abci.Header) *Application { - a := &Application{ApplicationParams: params} - if lastBlockHeader != nil { - atomic.StorePointer(&a.lastBlockHeader, unsafe.Pointer(&lastBlockHeader)) - } - return a -} - func (a *Application) Info(req abci.RequestInfo) abci.ResponseInfo { return abci.ResponseInfo{ LastBlockAppHash: a.Store.Hash(), @@ -541,7 +512,7 @@ func (a *Application) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginB a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) contractUpkeepHandler, err := a.CreateContractUpkeepHandler(upkeepState) if err != nil { panic(err) @@ -561,7 +532,7 @@ func (a *Application) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginB a.curBlockHeader, nil, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) validatorManager, err := a.CreateValidatorManager(state) if err != registry.ErrNotFound { @@ -629,7 +600,7 @@ func (a *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { a.curBlockHeader, nil, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) validatorManager, err := a.CreateValidatorManager(state) if err != registry.ErrNotFound { @@ -686,10 +657,6 @@ func (a *Application) CheckTx(txBytes []byte) abci.ResponseCheckTx { a.GetValidatorSet, ).WithOnChainConfig(a.config) - if a.EVMState != nil { - state = state.WithEVMState(a.EVMState.Clone()) - } - // Receipts & events generated in CheckTx must be discarded since the app state changes they // reflect aren't persisted. defer a.ReceiptHandlerProvider.Store().DiscardCurrentReceipt() @@ -725,7 +692,7 @@ func (a *Application) DeliverTx(txBytes []byte) abci.ResponseDeliverTx { a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) var r abci.ResponseDeliverTx @@ -758,7 +725,7 @@ func (a *Application) processTx(storeTx store.KVStoreTx, txBytes []byte, isCheck a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) receiptHandler := a.ReceiptHandlerProvider.Store() defer receiptHandler.DiscardCurrentReceipt() @@ -811,7 +778,7 @@ func (a *Application) deliverTx2(storeTx store.KVStoreTx, txBytes []byte) abci.R a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) receiptHandler := a.ReceiptHandlerProvider.Store() defer receiptHandler.DiscardCurrentReceipt() @@ -863,22 +830,13 @@ func (a *Application) Commit() abci.ResponseCommit { commitBlockLatency.With(lvs...).Observe(time.Since(begin).Seconds()) }(time.Now()) - if a.EVMState != nil { - // Commit EVM state changes to the EvmStore - if err := a.EVMState.Commit(); err != nil { - panic(err) - } - } - - storeOpts := store.VersionedKVStoreSaveOptions{ - FlushInterval: int64(a.config.GetAppStore().GetIAVLFlushInterval()), - } - appHash, _, err := a.Store.SaveVersion(&storeOpts) + appHash, _, err := a.Store.SaveVersion() if err != nil { panic(err) } height := a.curBlockHeader.GetHeight() + if err := a.EvmAuxStore.SaveChildTxRefs(a.childTxRefs); err != nil { // TODO: consider panic instead log.Error("Failed to save Tendermint -> EVM tx hash refs", "height", height, "err", err) @@ -893,8 +851,7 @@ func (a *Application) Commit() abci.ResponseCommit { // Update the last block header before emitting events in case the subscribers attempt to access // the latest committed state as soon as they receive an event. - curBlockHeader := a.curBlockHeader - atomic.StorePointer(&a.lastBlockHeader, unsafe.Pointer(&curBlockHeader)) + a.lastBlockHeader = a.curBlockHeader go func(height int64, blockHeader abci.Header, committedTxs []CommittedTx) { if err := a.EventHandler.EmitBlockTx(uint64(height), blockHeader.Time); err != nil { @@ -947,38 +904,13 @@ func (a *Application) height() int64 { } func (a *Application) ReadOnlyState() State { - lastBlockHeader := (*abci.Header)(atomic.LoadPointer(&a.lastBlockHeader)) - // When the node is started with no previous blockchain state (e.g. completely new chain) then - // there'll be a very brief period where lastBlockHeader will be nil (until Application.Commit is called for the - // first time). While lastBlockHeader is nil the node won't be able to return useful responses to most queries, - // so we just make it panic here so the clients get an obvious error. - // TODO: This is just quick hack, the proper way to deal with this scenario is to start the QueryServer only after - // the lastBlockHeader has been set. - if lastBlockHeader == nil { - panic(errors.New("unable to respond to query, app isn't ready yet")) - } - - appStateSnapshot, err := a.Store.GetSnapshotAt(lastBlockHeader.Height) - if err != nil { - panic(err) - } - - var evmStateSnapshot *EVMState - if a.EVMState != nil { - evmStateSnapshot, err = a.EVMState.GetSnapshot( - lastBlockHeader.Height, - store.GetEVMRootFromAppStore(appStateSnapshot), - ) - if err != nil { - panic(err) - } - } - + // TODO: the store snapshot should be created atomically, otherwise the block header might + // not match the state... need to figure out why this hasn't spectacularly failed already return NewStoreStateSnapshot( nil, - appStateSnapshot, - *lastBlockHeader, + a.Store.GetSnapshot(), + a.lastBlockHeader, nil, // TODO: last block hash! a.GetValidatorSet, - ).WithEVMState(evmStateSnapshot) + ) } diff --git a/app_test.go b/app_test.go index b72f66f496..9b3e04f435 100644 --- a/app_test.go +++ b/app_test.go @@ -60,8 +60,8 @@ func mockMultiWriterStore(flushInterval int64) (*store.MultiWriterAppStore, erro return nil, err } memDb, _ = db.LoadMemDB() - evmStore := store.NewEvmStore(memDb, 100, 0) - multiWriterStore, err := store.NewMultiWriterAppStore(iavlStore, evmStore) + evmStore := store.NewEvmStore(memDb, 100) + multiWriterStore, err := store.NewMultiWriterAppStore(iavlStore, evmStore, false) if err != nil { return nil, err } diff --git a/cmd/loom/db/db.go b/cmd/loom/db/db.go index e292f2d6f5..49b5751cc0 100644 --- a/cmd/loom/db/db.go +++ b/cmd/loom/db/db.go @@ -14,6 +14,8 @@ func NewDBCommand() *cobra.Command { cmd.AddCommand( newPruneDBCommand(), newCompactDBCommand(), + newDumpEVMStateCommand(), + newDumpEVMStateMultiWriterAppStoreCommand(), newDumpEVMStateFromEvmDB(), newGetEvmHeightCommand(), newGetAppHeightCommand(), diff --git a/cmd/loom/db/evm.go b/cmd/loom/db/evm.go index 614f4c93ac..fdda84a2d9 100644 --- a/cmd/loom/db/evm.go +++ b/cmd/loom/db/evm.go @@ -3,6 +3,7 @@ package db import ( + "context" "fmt" "math" "path" @@ -13,18 +14,235 @@ import ( gstate "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/loomnetwork/loomchain" "github.com/loomnetwork/loomchain/cmd/loom/common" cdb "github.com/loomnetwork/loomchain/db" + "github.com/loomnetwork/loomchain/events" + "github.com/loomnetwork/loomchain/evm" + "github.com/loomnetwork/loomchain/log" + "github.com/loomnetwork/loomchain/plugin" + "github.com/loomnetwork/loomchain/receipts" + registry "github.com/loomnetwork/loomchain/registry/factory" "github.com/loomnetwork/loomchain/store" "github.com/spf13/cobra" + abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" ) +func newDumpEVMStateCommand() *cobra.Command { + var appHeight int64 + + cmd := &cobra.Command{ + Use: "evm-dump", + Short: "Dumps EVM state stored at a specific block height", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := common.ParseConfig() + if err != nil { + return err + } + + db, err := dbm.NewGoLevelDB(cfg.DBName, cfg.RootPath()) + if err != nil { + return err + } + appStore, err := store.NewIAVLStore(db, 0, appHeight, 0) + if err != nil { + return err + } + + eventHandler := loomchain.NewDefaultEventHandler(events.NewLogEventDispatcher()) + + regVer, err := registry.RegistryVersionFromInt(cfg.RegistryVersion) + if err != nil { + return err + } + createRegistry, err := registry.NewRegistryFactory(regVer) + if err != nil { + return err + } + + receiptHandlerProvider := receipts.NewReceiptHandlerProvider( + eventHandler, + cfg.EVMPersistentTxReceiptsMax, + nil, + ) + + // TODO: This should use snapshot obtained from appStore.ReadOnlyState() + storeTx := store.WrapAtomic(appStore).BeginTx() + state := loomchain.NewStoreState( + context.Background(), + storeTx, + abci.Header{ + Height: appStore.Version(), + }, + // it is possible to load the block hash from the TM block store, but probably don't + // need it for just dumping the EVM state + nil, + nil, + ) + + var newABMFactory plugin.NewAccountBalanceManagerFactoryFunc + if evm.EVMEnabled && cfg.EVMAccountsEnabled { + newABMFactory = plugin.NewAccountBalanceManagerFactory + } + + var accountBalanceManager evm.AccountBalanceManager + if newABMFactory != nil { + pvm := plugin.NewPluginVM( + common.NewDefaultContractsLoader(cfg), + state, + createRegistry(state), + eventHandler, + log.Default, + newABMFactory, + receiptHandlerProvider.Writer(), + receiptHandlerProvider.Reader(), + ) + createABM, err := newABMFactory(pvm) + if err != nil { + return err + } + accountBalanceManager = createABM(true) + if err != nil { + return err + } + } + + vm, err := evm.NewLoomEvm(state, accountBalanceManager, nil, false) + if err != nil { + return err + } + + fmt.Printf("\n--- EVM state at app height %d ---\n%s\n", appStore.Version(), string(vm.RawDump())) + return nil + }, + } + + cmdFlags := cmd.Flags() + cmdFlags.Int64Var(&appHeight, "app-height", 0, "Dump EVM state as it was the specified app height") + return cmd +} + +func newDumpEVMStateMultiWriterAppStoreCommand() *cobra.Command { + var appHeight int64 + var evmDBName string + cmd := &cobra.Command{ + Use: "evm-dump-2", + Short: "Dumps EVM state stored at a specific block height from MultiWriterAppStore", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := common.ParseConfig() + if err != nil { + return err + } + + db, err := dbm.NewGoLevelDB(cfg.DBName, cfg.RootPath()) + if err != nil { + return err + } + evmDB, err := cdb.LoadDB( + "goleveldb", + evmDBName, + cfg.RootPath(), + 256, + 4, + false, + ) + if err != nil { + return err + } + iavlStore, err := store.NewIAVLStore(db, 0, appHeight, 0) + if err != nil { + return err + } + evmStore := store.NewEvmStore(evmDB, 100) + if err := evmStore.LoadVersion(iavlStore.Version()); err != nil { + return err + } + + appStore, err := store.NewMultiWriterAppStore(iavlStore, evmStore, false) + if err != nil { + return err + } + eventHandler := loomchain.NewDefaultEventHandler(events.NewLogEventDispatcher()) + + regVer, err := registry.RegistryVersionFromInt(cfg.RegistryVersion) + if err != nil { + return err + } + createRegistry, err := registry.NewRegistryFactory(regVer) + if err != nil { + return err + } + + receiptHandlerProvider := receipts.NewReceiptHandlerProvider( + eventHandler, + cfg.EVMPersistentTxReceiptsMax, + nil, + ) + + // TODO: This should use snapshot obtained from appStore.ReadOnlyState() + storeTx := store.WrapAtomic(appStore).BeginTx() + state := loomchain.NewStoreState( + context.Background(), + storeTx, + abci.Header{ + Height: appStore.Version(), + }, + // it is possible to load the block hash from the TM block store, but probably don't + // need it for just dumping the EVM state + nil, + nil, + ) + + var newABMFactory plugin.NewAccountBalanceManagerFactoryFunc + if evm.EVMEnabled && cfg.EVMAccountsEnabled { + newABMFactory = plugin.NewAccountBalanceManagerFactory + } + + var accountBalanceManager evm.AccountBalanceManager + if newABMFactory != nil { + pvm := plugin.NewPluginVM( + common.NewDefaultContractsLoader(cfg), + state, + createRegistry(state), + eventHandler, + log.Default, + newABMFactory, + receiptHandlerProvider.Writer(), + receiptHandlerProvider.Reader(), + ) + createABM, err := newABMFactory(pvm) + if err != nil { + return err + } + accountBalanceManager = createABM(true) + if err != nil { + return err + } + } + + vm, err := evm.NewLoomEvm(state, accountBalanceManager, nil, false) + if err != nil { + return err + } + + fmt.Printf("\n--- EVM state at app height %d ---\n%s\n", appStore.Version(), string(vm.RawDump())) + return nil + }, + } + + cmdFlags := cmd.Flags() + cmdFlags.Int64Var(&appHeight, "app-height", 0, "Dump EVM state as it was the specified app height") + cmdFlags.StringVar(&evmDBName, "evmdb-name", "evm", "Name of EVM state database") + return cmd +} + func newDumpEVMStateFromEvmDB() *cobra.Command { var appHeight int64 var evmDBName string var dumpStorageTrie bool cmd := &cobra.Command{ - Use: "evm-dump", + Use: "evm-dump-3", Short: "Dumps EVM state stored at a specific block height from evm.db", RunE: func(cmd *cobra.Command, args []string) error { cfg, err := common.ParseConfig() @@ -44,7 +262,7 @@ func newDumpEVMStateFromEvmDB() *cobra.Command { return err } - evmStore := store.NewEvmStore(evmDB, 100, -1) + evmStore := store.NewEvmStore(evmDB, 100) if err := evmStore.LoadVersion(appHeight); err != nil { return err } @@ -53,7 +271,21 @@ func newDumpEVMStateFromEvmDB() *cobra.Command { fmt.Printf("version: %d, root: %x\n", version, root) - srcStateDB := gstate.NewDatabase(store.NewLoomEthDB(evmDB)) + // TODO: This should use snapshot obtained from appStore.ReadOnlyState() + storeTx := store.WrapAtomic(evmStore).BeginTx() + state := loomchain.NewStoreState( + context.Background(), + storeTx, + abci.Header{ + Height: appHeight, + }, + // it is possible to load the block hash from the TM block store, but probably don't + // need it for just dumping the EVM state + nil, + nil, + ) + + srcStateDB := gstate.NewDatabase(evm.NewLoomEthdb(state, nil)) srcStateDBTrie, err := srcStateDB.OpenTrie(evmRoot) if err != nil { fmt.Printf("cannot open trie, %s\n", evmRoot.Hex()) @@ -132,7 +364,7 @@ func newGetEvmHeightCommand() *cobra.Command { } defer db.Close() - evmStore := store.NewEvmStore(db, 100, -1) + evmStore := store.NewEvmStore(db, 100) if err := evmStore.LoadVersion(math.MaxInt64); err != nil { return err } diff --git a/cmd/loom/loom.go b/cmd/loom/loom.go index 81c0761895..55caa2bd2f 100644 --- a/cmd/loom/loom.go +++ b/cmd/loom/loom.go @@ -73,7 +73,6 @@ import ( "github.com/pkg/errors" stdprometheus "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" - abci "github.com/tendermint/tendermint/abci/types" "golang.org/x/crypto/ed25519" ) @@ -405,10 +404,10 @@ func newRunCommand() *cobra.Command { app, err := loadApp(chainID, cfg, loader, backend, appHeight) if err != nil { - return errors.Wrap(err, "failed to initialize app") + return err } if err := backend.Start(app); err != nil { - return errors.Wrap(err, "failed to initialize backend") + return err } nodeSigner, err := backend.NodeSigner() @@ -608,15 +607,12 @@ func destroyBlockIndexDB(cfg *config.Config) error { return nil } -func loadAppStore( - cfg *config.Config, logger *loom.Logger, targetVersion int64, -) (store.VersionedKVStore, *store.EvmStore, error) { +func loadAppStore(cfg *config.Config, logger *loom.Logger, targetVersion int64) (store.VersionedKVStore, error) { db, err := cdb.LoadDB( - cfg.DBBackend, cfg.DBName, cfg.RootPath(), cfg.DBBackendConfig.CacheSizeMegs, - cfg.DBBackendConfig.WriteBufferMegs, cfg.Metrics.Database, + cfg.DBBackend, cfg.DBName, cfg.RootPath(), cfg.DBBackendConfig.CacheSizeMegs, cfg.DBBackendConfig.WriteBufferMegs, cfg.Metrics.Database, ) if err != nil { - return nil, nil, err + return nil, err } if cfg.AppStore.CompactOnLoad { @@ -630,47 +626,60 @@ func loadAppStore( } var appStore store.VersionedKVStore - var evmStore *store.EvmStore if cfg.AppStore.Version == 1 { // TODO: cleanup these hardcoded numbers - logger.Info("Loading IAVL Store") - appStore, err = store.NewIAVLStore(db, cfg.AppStore.MaxVersions, targetVersion, cfg.AppStore.IAVLFlushInterval) - if err != nil { - return nil, nil, err + if cfg.AppStore.PruneInterval > int64(0) { + logger.Info("Loading Pruning IAVL Store") + appStore, err = store.NewPruningIAVLStore(db, store.PruningIAVLStoreConfig{ + MaxVersions: cfg.AppStore.MaxVersions, + BatchSize: cfg.AppStore.PruneBatchSize, + Interval: time.Duration(cfg.AppStore.PruneInterval) * time.Second, + Logger: logger, + FlushInterval: cfg.AppStore.IAVLFlushInterval, + }) + if err != nil { + return nil, err + } + } else { + logger.Info("Loading IAVL Store") + appStore, err = store.NewIAVLStore(db, cfg.AppStore.MaxVersions, targetVersion, cfg.AppStore.IAVLFlushInterval) + if err != nil { + return nil, err + } } } else if cfg.AppStore.Version == 3 { logger.Info("Loading Multi-Writer App Store") iavlStore, err := store.NewIAVLStore(db, cfg.AppStore.MaxVersions, targetVersion, cfg.AppStore.IAVLFlushInterval) if err != nil { - return nil, nil, err + return nil, err } - evmStore, err = loadEvmStore(cfg, iavlStore.Version()) + evmStore, err := loadEvmStore(cfg, iavlStore.Version()) if err != nil { - return nil, nil, err + return nil, err } - appStore, err = store.NewMultiWriterAppStore(iavlStore, evmStore) + appStore, err = store.NewMultiWriterAppStore(iavlStore, evmStore, cfg.AppStore.SaveEVMStateToIAVL) if err != nil { - return nil, nil, err + return nil, err } } else { - return nil, nil, errors.New("Invalid AppStore.Version config setting") + return nil, errors.New("Invalid AppStore.Version config setting") } if cfg.LogStateDB { appStore, err = store.NewLogStore(appStore) if err != nil { - return nil, nil, err + return nil, err } } if cfg.CachingStoreConfig.CachingEnabled { appStore, err = store.NewVersionedCachingStore(appStore, cfg.CachingStoreConfig, appStore.Version()) if err != nil { - return nil, nil, err + return nil, err } logger.Info("VersionedCachingStore enabled") } - return appStore, evmStore, nil + return appStore, nil } func loadEventStore(cfg *config.Config, logger *loom.Logger) (store.EventStore, error) { @@ -701,12 +710,7 @@ func loadEvmStore(cfg *config.Config, targetVersion int64) (*store.EvmStore, err if err != nil { return nil, err } - if cfg.AppStore.IAVLFlushInterval != evmStoreCfg.FlushInterval && - cfg.AppStore.IAVLFlushInterval > 0 && - evmStoreCfg.FlushInterval > 0 { - return nil, errors.New("invalid config, AppStore.IAVLFlushInterval doesn't match EvmStore.FlushInterval") - } - evmStore := store.NewEvmStore(db, evmStoreCfg.NumCachedRoots, evmStoreCfg.FlushInterval) + evmStore := store.NewEvmStore(db, evmStoreCfg.NumCachedRoots) if err := evmStore.LoadVersion(targetVersion); err != nil { return nil, err } @@ -722,7 +726,8 @@ func loadApp( ) (*loomchain.Application, error) { logger := log.Root - appStore, evmStore, err := loadAppStore(cfg, log.Default, appHeight) + appStore, err := loadAppStore(cfg, log.Default, appHeight) + if err != nil { return nil, err } @@ -809,7 +814,6 @@ func loadApp( ), nil }) - var evmState *loomchain.EVMState if evm.EVMEnabled { vmManager.Register(vm.VMType_EVM, func(state loomchain.State) (vm.VM, error) { var createABM evm.AccountBalanceManagerFactoryFunc @@ -832,13 +836,8 @@ func loadApp( } return evm.NewLoomVm(state, eventHandler, receiptHandlerProvider.Writer(), createABM, cfg.EVMDebugEnabled), nil }) - - evmState, err = loomchain.NewEVMState(evmStore) - if err != nil { - return nil, err - } } - store.LogEthDBBatch = cfg.LogEthDbBatch + evm.LogEthDbBatch = cfg.LogEthDbBatch deployTxHandler := &vm.DeployTxHandler{ Manager: vmManager, @@ -1117,15 +1116,7 @@ func loadApp( // as it doesn't pass control to other middlewares after it. postCommitMiddlewares = append(postCommitMiddlewares, nonceTxHandler.PostCommitMiddleware()) - var lastBlockHeader *abci.Header - if appStore.Version() > 0 { - lastBlockHeader, err = b.LoadBlockHeader(appStore.Version()) - if err != nil { - return nil, err - } - } - - return loomchain.NewApplication(loomchain.ApplicationParams{ + return &loomchain.Application{ Store: appStore, Init: init, TxHandler: loomchain.MiddlewareTxHandler( @@ -1143,8 +1134,7 @@ func loadApp( GetValidatorSet: getValidatorSet, EvmAuxStore: evmAuxStore, ReceiptsVersion: cfg.ReceiptsVersion, - EVMState: evmState, - }, lastBlockHeader), nil + }, nil } func deployContract( diff --git a/config/config.go b/config/config.go index dc04606bd9..3f39230d22 100755 --- a/config/config.go +++ b/config/config.go @@ -747,11 +747,9 @@ AppStore: PruneInterval: {{ .AppStore.PruneInterval }} # Number of versions to prune at a time. PruneBatchSize: {{ .AppStore.PruneBatchSize }} - # Specifies the number of IAVL tree versions that should be kept in memory before writing a new - # version to disk. - # If set to zero every version will be written to disk unless overridden via the on-chain config. - # If set to -1 every version will always be written to disk, regardless of the on-chain config. - IAVLFlushInterval: {{ .AppStore.IAVLFlushInterval }} + # If true the app store will write EVM state to both IAVLStore and EvmStore + # This config works with AppStore Version 3 (MultiWriterAppStore) only + SaveEVMStateToIAVL: {{ .AppStore.SaveEVMStateToIAVL }} {{if .EventStore -}} # # EventStore @@ -775,12 +773,6 @@ EvmStore: CacheSizeMegs: {{.EvmStore.CacheSizeMegs}} # NumCachedRoots defines a number of in-memory cached EVM roots NumCachedRoots: {{.EvmStore.NumCachedRoots}} - # Specifies the number of Merkle tree versions that should be kept in memory before writing a - # new version to disk. - # If set to zero every version will be written to disk unless overridden via the on-chain config - # AppStore.IAVLFlushInterval setting. - # If set to -1 every version will always be written to disk, regardless of the on-chain config. - FlushInterval: {{.EvmStore.FlushInterval}} {{end}} {{if .Web3 -}} diff --git a/e2e/chainconfig-loom.yaml b/e2e/chainconfig-loom.yaml index 7876eabbd9..ef84521915 100644 --- a/e2e/chainconfig-loom.yaml +++ b/e2e/chainconfig-loom.yaml @@ -6,5 +6,6 @@ ContractLogLevel: "debug" LoomLogLevel: "debug" AppStore: Version: 3 + SaveEVMStateToIAVL: false CachingStore: CachingEnabled: true \ No newline at end of file diff --git a/e2e/chainconfig-routine-loom.yaml b/e2e/chainconfig-routine-loom.yaml index 0eb89e273e..7d81b6b602 100644 --- a/e2e/chainconfig-routine-loom.yaml +++ b/e2e/chainconfig-routine-loom.yaml @@ -8,5 +8,6 @@ ContractLogLevel: "debug" LoomLogLevel: "debug" AppStore: Version: 3 + SaveEVMStateToIAVL: false CachingStore: CachingEnabled: true \ No newline at end of file diff --git a/e2e/dposv2-genesis.json b/e2e/dposv2-genesis.json deleted file mode 100644 index ac639bf0e7..0000000000 --- a/e2e/dposv2-genesis.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "contracts": [ - { - "vm": "plugin", - "format": "plugin", - "name": "coin", - "location": "coin:1.0.0", - "init": null - }, - { - "vm": "plugin", - "format": "plugin", - "name": "dposV2", - "location": "dposV2:2.0.0", - "init": { - "params": { - "validatorCount": "21", - "electionCycleLength": "604800" - }, - "validators": [ - { - "pubKey": "dMI2nJa3ZOxU3yFYNVRYarPOda5b19qZdGENG6yFVVk=", - "power": "10" - } - ] - } - }, - { - "vm": "plugin", - "format": "plugin", - "name": "addressmapper", - "location": "addressmapper:0.1.0", - "init": null - }, - { - "vm": "plugin", - "format": "plugin", - "name": "chainconfig", - "location": "chainconfig:1.0.0", - "init": { - "owner": { - "chainId": "default", - "local": "8ebnLFSTiXXZuVhl8mQJRL8kwJk=" - }, - "features": [ - { - "name": "dpos:v3.1", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.1", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.2", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.3", - "status": "WAITING" - }, - { - "name": "receipts:v2", - "status": "WAITING" - }, - { - "name": "receipts:v3.4", - "status": "WAITING" - }, - { - "name": "receipts:v3.1", - "status": "WAITING" - }, - { - "name": "coin:v1.1", - "status": "WAITING" - }, - { - "name": "appstore:v3.1", - "status": "WAITING" - }, - { - "name": "auth:sigtx:eth", - "status": "WAITING" - }, - { - "name": "tx:eth", - "status": "WAITING" - }, - { - "name": "tx:check-value", - "status": "WAITING" - }, - { - "name": "evm:constantinople", - "status": "WAITING" - } - ] - } - } - ] -} diff --git a/e2e/enable-receipts-v2-feature-genesis.json b/e2e/enable-receipts-v2-feature-genesis.json index 04452b11d5..09349c4a73 100644 --- a/e2e/enable-receipts-v2-feature-genesis.json +++ b/e2e/enable-receipts-v2-feature-genesis.json @@ -22,30 +22,6 @@ "numBlockConfirmations":"1" }, "features": [ - { - "name": "chaincfg:v1.1", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.2", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.3", - "status": "WAITING" - }, - { - "name": "receipts:v3.4", - "status": "WAITING" - }, - { - "name": "receipts:v3.1", - "status": "WAITING" - }, - { - "name": "appstore:v3.1", - "status": "WAITING" - } ] } }, diff --git a/e2e/eth-test-1-loom.yaml b/e2e/eth-test-1-loom.yaml index f7d6933235..382cafce07 100644 --- a/e2e/eth-test-1-loom.yaml +++ b/e2e/eth-test-1-loom.yaml @@ -1,4 +1,3 @@ AppStore: - Version: 3 -DPOSVersion: 2 + Version: 1 CreateEmptyBlocks: false \ No newline at end of file diff --git a/e2e/eth_test.go b/e2e/eth_test.go index b2f94ed5c1..e223cb0251 100644 --- a/e2e/eth_test.go +++ b/e2e/eth_test.go @@ -18,10 +18,10 @@ func TestEthJSONRPC2(t *testing.T) { }{ {"blockNumber", "eth-1-test.toml", 1, 1, "empty-genesis.json", "eth-test-1-loom.yaml"}, {"ethPolls", "eth-2-test.toml", 1, 1, "empty-genesis.json", "eth-test-2-loom.yaml"}, - {"getBlockByNumber", "eth-3-test.toml", 1, 1, "dposv2-genesis.json", "eth-test-1-loom.yaml"}, - {"getBlockTransactionCountByNumber", "eth-4-test.toml", 1, 1, "dposv2-genesis.json", "eth-test-1-loom.yaml"}, + {"getBlockByNumber", "eth-3-test.toml", 1, 1, "empty-genesis.json", "eth-test-1-loom.yaml"}, + {"getBlockTransactionCountByNumber", "eth-4-test.toml", 1, 1, "empty-genesis.json", "eth-test-1-loom.yaml"}, {"getLogs", "eth-5-test.toml", 1, 4, "empty-genesis.json", "eth-test-2-loom.yaml"}, - {"go-getBlockByNumber", "eth-6-test.toml", 1, 3, "dposv2-genesis.json", "eth-test-1-loom.yaml"}, + {"go-getBlockByNumber", "eth-6-test.toml", 1, 3, "coin.genesis.json", "eth-test-1-loom.yaml"}, } for _, test := range tests { diff --git a/e2e/loom-3-loom.yaml b/e2e/loom-3-loom.yaml index 6f8361ff50..496007726c 100644 --- a/e2e/loom-3-loom.yaml +++ b/e2e/loom-3-loom.yaml @@ -6,6 +6,7 @@ ContractLogLevel: "debug" LoomLogLevel: "debug" AppStore: Version: 3 + SaveEVMStateToIAVL: false CachingStoreConfig: CachingEnabled: true diff --git a/e2e/loom-4-loom.yaml b/e2e/loom-4-loom.yaml index e5636d34cc..c0421ee418 100644 --- a/e2e/loom-4-loom.yaml +++ b/e2e/loom-4-loom.yaml @@ -6,6 +6,7 @@ ContractLogLevel: "debug" LoomLogLevel: "debug" AppStore: Version: 3 + SaveEVMStateToIAVL: false CachingStoreConfig: CachingEnabled: true diff --git a/e2e/loom-5-loom.yaml b/e2e/loom-5-loom.yaml index eb3fd47312..aa620c1504 100644 --- a/e2e/loom-5-loom.yaml +++ b/e2e/loom-5-loom.yaml @@ -7,5 +7,6 @@ ContractLogLevel: "debug" LoomLogLevel: "debug" AppStore: Version: 3 + SaveEVMStateToIAVL: false CachingStoreConfig: CachingEnabled: true \ No newline at end of file diff --git a/e2e/tests/truffle/test/EvmSnapshot.js b/e2e/tests/truffle/test/EvmSnapshot.js deleted file mode 100644 index 62a997b7ff..0000000000 --- a/e2e/tests/truffle/test/EvmSnapshot.js +++ /dev/null @@ -1,94 +0,0 @@ -const { - waitForXBlocks, - ethGetTransactionCount -} = require('./helpers') -const Web3 = require('web3') -const fs = require('fs') -const path = require('path') -const { - SpeculativeNonceTxMiddleware, - SignedTxMiddleware, - Client, - EthersSigner, - createDefaultTxMiddleware, - Address, - LocalAddress, - CryptoUtils, - LoomProvider, - Contracts -} = require('loom-js') -const ethers = require('ethers').ethers - -const NonceTestContract = artifacts.require('NonceTestContract'); - -// web3 functions called using truffle objects use the loomProvider -// web3 functions called uisng we3js access the loom QueryInterface directly -contract('TestEvmSnapshot', async (accounts) => { - // This test is not provider dependent so just run it with Loom Truffle provider - if (process.env.TRUFFLE_PROVIDER === 'hdwallet') { - return - } - - let contract, from, nodeAddr - - beforeEach(async () => { - nodeAddr = fs.readFileSync(path.join(process.env.CLUSTER_DIR, '0', 'node_rpc_addr'), 'utf-8').trim() - - const client = new Client('default', `ws://${nodeAddr}/websocket`, `ws://${nodeAddr}/queryws`) - client.on('error', msg => { - console.error('Error on connect to client', msg) - console.warn('Please verify if loom cluster is running') - }) - const privKey = CryptoUtils.generatePrivateKey() - const pubKey = CryptoUtils.publicKeyFromPrivateKey(privKey) - client.txMiddleware = createDefaultTxMiddleware(client, privKey); - - const setupMiddlewareFn = function (client, privateKey) { - const publicKey = CryptoUtils.publicKeyFromPrivateKey(privateKey) - return [new SpeculativeNonceTxMiddleware(publicKey, client), new SignedTxMiddleware(privateKey)] - } - const loomProvider = new LoomProvider(client, privKey, setupMiddlewareFn) - const web3 = new Web3(loomProvider) - - // Create a mapping between a DAppChain account & an Ethereum account so that - // ethGetTransactionCount can resolve the Ethereum address it's given to a DAppChain address - const localAddr = new Address(client.chainId, LocalAddress.fromPublicKey(pubKey)); - const addressMapper = await Contracts.AddressMapper.createAsync(client, localAddr); - const ethAccount = web3.eth.accounts.create(); - const ethWallet = new ethers.Wallet(ethAccount.privateKey); - await addressMapper.addIdentityMappingAsync( - localAddr, - new Address('eth', LocalAddress.fromHexString(ethAccount.address)), - new EthersSigner(ethWallet) - ); - from = ethAccount.address - - const nonceTestContract = await NonceTestContract.deployed() - contract = new web3.eth.Contract( - NonceTestContract._json.abi, - nonceTestContract.address, - // contract calls go through LoomProvider, which expect the sender address to be - // a local address (not an eth address) - { - from: localAddr.local.toString() - } - ); - }) - - // SnapshotTest generates a lot of txs and queries and send them to a contract almost at the same time. - // This test ensures that the snapshot does not have a concurrent read/write problem. - it('SnapshotTest', async () => { - for (var i = 0; i < 50; i++) { - contract.methods.set(7777).send().then() - contract.methods.get().call().then() - } - for (var i = 0; i < 50; i++) { - contract.methods.set(8888).send().then() - contract.methods.get().call().then() - } - await waitForXBlocks(nodeAddr, 5) - await contract.methods.set(9999).send().then() - assert.equal(await contract.methods.get().call(), 9999) - }); - -}); \ No newline at end of file diff --git a/evm/config.go b/evm/config.go index 1d8575d205..4239a74b11 100644 --- a/evm/config.go +++ b/evm/config.go @@ -12,12 +12,6 @@ type EvmStoreConfig struct { WriteBufferMegs int // NumCachedRoots defines a number of in-memory cached EVM roots NumCachedRoots int - // Specifies the number of Merkle tree versions that should be kept in memory before writing a - // new version to disk. - // If set to zero every version will be written to disk unless overridden via the on-chain config - // AppStore.IAVLFlushInterval setting. - // If set to -1 every version will always be written to disk, regardless of the on-chain config. - FlushInterval int64 } func DefaultEvmStoreConfig() *EvmStoreConfig { @@ -26,8 +20,7 @@ func DefaultEvmStoreConfig() *EvmStoreConfig { DBBackend: "goleveldb", CacheSizeMegs: 256, WriteBufferMegs: 4, - NumCachedRoots: 500, - FlushInterval: 0, + NumCachedRoots: 100, } } diff --git a/evm/evm_test.go b/evm/evm_test.go index f60778f472..217d2c3f5d 100644 --- a/evm/evm_test.go +++ b/evm/evm_test.go @@ -18,7 +18,6 @@ import ( ethvm "github.com/ethereum/go-ethereum/core/vm" "github.com/loomnetwork/go-loom" "github.com/loomnetwork/loomchain" - "github.com/loomnetwork/loomchain/db" "github.com/loomnetwork/loomchain/features" "github.com/loomnetwork/loomchain/store" lvm "github.com/loomnetwork/loomchain/vm" @@ -41,17 +40,7 @@ func mockState() loomchain.State { header := abci.Header{} header.Height = BlockHeight header.Time = blockTime - return loomchain.NewStoreState(context.Background(), store.NewMemStore(), header, nil, nil). - WithEVMState(mockEVMState()) -} - -func mockEVMState() *loomchain.EVMState { - memDb, _ := db.LoadMemDB() - evmState, err := loomchain.NewEVMState(store.NewEvmStore(memDb, 100, 0)) - if err != nil { - panic(err) - } - return evmState + return loomchain.NewStoreState(context.Background(), store.NewMemStore(), header, nil, nil) } func TestProcessDeployTx(t *testing.T) { @@ -220,11 +209,17 @@ func TestGlobals(t *testing.T) { vm, _ := manager.InitVM(lvm.VMType_EVM, state) abiGP, gPAddr := deploySolContract(t, caller, "GlobalProperties", vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testNow(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testBlockTimeStamp(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testBlockNumber(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testTxOrigin(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testMsgSender(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testMsgValue(t, abiGP, caller, gPAddr, vm) } diff --git a/evm/loom_statedb.go b/evm/loom_statedb.go index 4ac88b4054..e1357fac67 100644 --- a/evm/loom_statedb.go +++ b/evm/loom_statedb.go @@ -16,7 +16,11 @@ type LoomStateDB struct { abm *evmAccountBalanceManager } -func newLoomStateDB(abm *evmAccountBalanceManager, sdb *state.StateDB) (*LoomStateDB, error) { +func newLoomStateDB(abm *evmAccountBalanceManager, root common.Hash, db state.Database) (*LoomStateDB, error) { + sdb, err := state.New(root, db) + if err != nil { + return nil, err + } return &LoomStateDB{ StateDB: sdb, abm: abm, diff --git a/store/loomethdb.go b/evm/loomethdb.go similarity index 51% rename from store/loomethdb.go rename to evm/loomethdb.go index 1e3ccb21a6..9d5024f333 100644 --- a/store/loomethdb.go +++ b/evm/loomethdb.go @@ -1,95 +1,89 @@ -package store +// +build evm + +package evm import ( "bytes" "log" "os" "sort" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" - loom "github.com/loomnetwork/go-loom" - "github.com/loomnetwork/go-loom/util" - "github.com/loomnetwork/loomchain/db" - dbm "github.com/tendermint/tendermint/libs/db" + "github.com/loomnetwork/loomchain" + "github.com/loomnetwork/loomchain/store" ) var ( - LogEthDBBatch = true + LogEthDbBatch = true logger log.Logger loggerStarted = false ) -// EthDBLogContext provides additional context when -type EthDBLogContext struct { - blockHeight int64 - contractAddr loom.Address - callerAddr loom.Address -} - -func NewEthDBLogContext(height int64, contractAddr loom.Address, callerAddr loom.Address) *EthDBLogContext { - return &EthDBLogContext{ - blockHeight: height, - contractAddr: contractAddr, - callerAddr: callerAddr, - } -} - -// LoomEthDB implements ethdb.Database -type LoomEthDB struct { - db db.DBWrapper +// implements ethdb.Database +type LoomEthdb struct { + state store.KVStore + lock sync.RWMutex + logContext *ethdbLogContext } -func NewLoomEthDB(db db.DBWrapper) *LoomEthDB { - return &LoomEthDB{ - db: db, - } +func NewLoomEthdb(_state loomchain.State, logContext *ethdbLogContext) *LoomEthdb { + p := new(LoomEthdb) + p.state = store.PrefixKVStore(vmPrefix, _state) + p.logContext = logContext + return p } -func (s *LoomEthDB) Put(key []byte, value []byte) error { - s.db.Set(util.PrefixKey(vmPrefix, key), value) +func (s *LoomEthdb) Put(key []byte, value []byte) error { + s.state.Set(key, value) return nil } -func (s *LoomEthDB) Get(key []byte) ([]byte, error) { - return s.db.Get(util.PrefixKey(vmPrefix, key)), nil +func (s *LoomEthdb) Get(key []byte) ([]byte, error) { + return s.state.Get(key), nil } -func (s *LoomEthDB) Has(key []byte) (bool, error) { - return s.db.Has(util.PrefixKey(vmPrefix, key)), nil +func (s *LoomEthdb) Has(key []byte) (bool, error) { + return s.state.Has(key), nil } -func (s *LoomEthDB) Delete(key []byte) error { - s.db.Delete(util.PrefixKey(vmPrefix, key)) +func (s *LoomEthdb) Delete(key []byte) error { + s.state.Delete(key) return nil } -func (s *LoomEthDB) Close() { +func (s *LoomEthdb) Close() { } -func (s *LoomEthDB) NewBatch() ethdb.Batch { - if LogEthDBBatch { - return s.NewLogBatch(nil) +func (s *LoomEthdb) NewBatch() ethdb.Batch { + if LogEthDbBatch { + return s.NewLogBatch(s.logContext) + } else { + newBatch := new(batch) + newBatch.parentStore = s + newBatch.Reset() + return newBatch } - return newBatch(s.db) } // implements ethdb.Batch -type batch struct { - dbBatch dbm.Batch - db db.DBWrapper - size int +type kvPair struct { + key []byte + value []byte } -func newBatch(db db.DBWrapper) *batch { - return &batch{ - dbBatch: db.NewBatch(), - db: db, - } +type batch struct { + cache []kvPair + parentStore *LoomEthdb + size int } func (b *batch) Put(key, value []byte) error { - b.dbBatch.Set(util.PrefixKey(vmPrefix, key), value) + b.cache = append(b.cache, kvPair{ + key: common.CopyBytes(key), + value: common.CopyBytes(value), + }) b.size += len(value) return nil } @@ -99,22 +93,46 @@ func (b *batch) ValueSize() int { } func (b *batch) Write() error { - b.dbBatch.Write() + b.parentStore.lock.Lock() + defer b.parentStore.lock.Unlock() + + sort.Slice(b.cache, func(j, k int) bool { + return bytes.Compare(b.cache[j].key, b.cache[k].key) < 0 + }) + + for _, kv := range b.cache { + if kv.value == nil { + b.parentStore.Delete(kv.key) + } else { + b.parentStore.Put(kv.key, kv.value) + } + } return nil } func (b *batch) Reset() { - b.dbBatch.Close() - b.dbBatch = b.db.NewBatch() + b.cache = make([]kvPair, 0) b.size = 0 } func (b *batch) Delete(key []byte) error { - b.dbBatch.Delete(util.PrefixKey(vmPrefix, key)) + b.cache = append(b.cache, kvPair{ + key: common.CopyBytes(key), + value: nil, + }) return nil } -type EthDBLogParams struct { +func (b *batch) Dump(logger *log.Logger) { + b.parentStore.lock.Lock() + defer b.parentStore.lock.Unlock() + logger.Print("\n---- BATCH DUMP ----\n") + for i, kv := range b.cache { + logger.Printf("IDX %d, KEY %s\n", i, kv.key) + } +} + +type LogParams struct { LogFilename string LogFlags int LogReset bool @@ -128,15 +146,9 @@ type EthDBLogParams struct { LogBeforeWriteDump bool } -type kvPair struct { - key []byte - value []byte -} type LogBatch struct { - db db.DBWrapper - size int - params EthDBLogParams - cache []kvPair + batch batch + params LogParams } const batchHeaderWithContext = ` @@ -158,28 +170,29 @@ const batchHeader = ` ` -func (s *LoomEthDB) NewLogBatch(logContext *EthDBLogContext) ethdb.Batch { - b := &LogBatch{ - db: s.db, - params: EthDBLogParams{ - LogFilename: "ethdb-batch.log", - LogFlags: 0, - LogReset: true, - LogDelete: true, - LogWrite: true, - LogValueSize: false, - LogPutKey: true, - LogPutValue: false, - LogPutDump: false, - LogWriteDump: true, - LogBeforeWriteDump: false, - }, +func (s *LoomEthdb) NewLogBatch(logContext *ethdbLogContext) ethdb.Batch { + b := new(LogBatch) + b.batch = *new(batch) + b.batch.parentStore = s + b.batch.Reset() + b.params = LogParams{ + LogFilename: "ethdb-batch.log", + LogFlags: 0, + LogReset: true, + LogDelete: true, + LogWrite: true, + LogValueSize: false, + LogPutKey: true, + LogPutValue: false, + LogPutDump: false, + LogWriteDump: true, + LogBeforeWriteDump: false, } if !loggerStarted { file, err := os.Create(b.params.LogFilename) if err != nil { - panic(err) + return &b.batch } logger = *log.New(file, "", b.params.LogFlags) logger.Println("Created ethdb batch logger") @@ -197,11 +210,7 @@ func (b *LogBatch) Delete(key []byte) error { if b.params.LogDelete { logger.Println("Delete key: ", string(key)) } - b.cache = append(b.cache, kvPair{ - key: common.CopyBytes(key), - value: nil, - }) - return nil + return b.batch.Delete(key) } func (b *LogBatch) Put(key, value []byte) error { @@ -211,22 +220,19 @@ func (b *LogBatch) Put(key, value []byte) error { if b.params.LogPutValue { logger.Println("Put value: ", string(value)) } - b.cache = append(b.cache, kvPair{ - key: common.CopyBytes(key), - value: common.CopyBytes(value), - }) - b.size += len(value) + err := b.batch.Put(key, value) if b.params.LogPutDump { - b.Dump(&logger) + b.batch.Dump(&logger) } - return nil + return err } func (b *LogBatch) ValueSize() int { + size := b.batch.ValueSize() if b.params.LogValueSize { - logger.Println("ValueSize : ", b.size) + logger.Println("ValueSize : ", size) } - return b.size + return size } func (b *LogBatch) Write() error { @@ -235,41 +241,39 @@ func (b *LogBatch) Write() error { } if b.params.LogBeforeWriteDump { logger.Println("Write, before : ") - b.Dump(&logger) + b.batch.Dump(&logger) } - - sort.Slice(b.cache, func(j, k int) bool { - return bytes.Compare(b.cache[j].key, b.cache[k].key) < 0 - }) - - dbBatch := b.db.NewBatch() - for _, kv := range b.cache { - if kv.value == nil { - dbBatch.Delete(util.PrefixKey(vmPrefix, kv.key)) - } else { - dbBatch.Set(util.PrefixKey(vmPrefix, kv.key), kv.value) - } - } - dbBatch.Write() - + err := b.batch.Write() if b.params.LogWriteDump { logger.Println("Write, after : ") - b.Dump(&logger) + b.batch.Dump(&logger) } - return nil + return err } func (b *LogBatch) Reset() { if b.params.LogReset { logger.Println("Reset batch") } - b.cache = make([]kvPair, 0) - b.size = 0 + b.batch.Reset() } -func (b *LogBatch) Dump(logger *log.Logger) { - logger.Print("\n---- BATCH DUMP ----\n") - for i, kv := range b.cache { - logger.Printf("IDX %d, KEY %s\n", i, kv.key) +// sortKeys sorts prefixed keys, it will sort the postfix of the key in ascending lexographical order +func sortKeys(prefix []byte, kvs []kvPair) []kvPair { + var unsorted, sorted []int + var tmpKv []kvPair + for i, kv := range kvs { + if 0 == bytes.Compare(prefix, kv.key[:len(prefix)]) { + unsorted = append(unsorted, i) + sorted = append(sorted, i) + } + tmpKv = append(tmpKv, kv) + } + sort.Slice(sorted, func(j, k int) bool { + return bytes.Compare(kvs[sorted[j]].key, kvs[sorted[k]].key) < 0 + }) + for index := 0; index < len(sorted); index++ { + kvs[unsorted[index]] = tmpKv[sorted[index]] } + return kvs } diff --git a/evm/loomethdb_test.go b/evm/loomethdb_test.go new file mode 100644 index 0000000000..2fbab564c8 --- /dev/null +++ b/evm/loomethdb_test.go @@ -0,0 +1,128 @@ +// +build evm + +package evm + +import ( + "bytes" + "sort" + "testing" + + "github.com/stretchr/testify/require" +) + +// This test only verifies running a sort twice gives same result +func TestSortKeys(t *testing.T) { + test1 := []kvPair{ + {[]byte("prefixFred"), []byte("data1")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("prefixSally"), []byte("data4")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixAnne"), []byte("data6")}, + } + test1 = sortKeys([]byte("prefix"), test1) + + test2 := []kvPair{ + {[]byte("prefixSally"), []byte("data4")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("prefixAnne"), []byte("data6")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixFred"), []byte("data1")}, + } + + test2 = sortKeys([]byte("prefix"), test2) + for i := 0; i < len(test1); i++ { + require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) + } +} + +// This test verifies that prefixed items are sorted by ascending order +func TestSortKeys2(t *testing.T) { + test1 := []kvPair{ + {[]byte("prefixSally"), []byte("data4")}, + {[]byte("prefixFred"), []byte("data1")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixAnne"), []byte("data6")}, + } + test1 = sortKeys([]byte("prefix"), test1) + + test2 := []kvPair{ + {[]byte("prefixAnne"), []byte("data6")}, + {[]byte("prefixFred"), []byte("data1")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixSally"), []byte("data4")}, + } + + for i := 0; i < len(test1); i++ { + require.Equal(t, string(test2[i].key), string(test1[i].key)) + } +} + +// Real life example +func TestSortSecureKeys(t *testing.T) { + test1 := []kvPair{ + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + test1 = sortKeys([]byte("secure-key-"), test1) + + test2 := []kvPair{ + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + + test2 = sortKeys([]byte("secure-key-"), test2) + + for i := 0; i < len(test1); i++ { + require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) + } +} + +func TestSortBarch(t *testing.T) { + test1 := []kvPair{ + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + sort.Slice(test1, func(j, k int) bool { + return bytes.Compare(test1[j].key, test1[k].key) < 0 + }) + + test2 := []kvPair{ + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + + sort.Slice(test2, func(j, k int) bool { + return bytes.Compare(test2[j].key, test2[k].key) < 0 + }) + + for i := 0; i < len(test1); i++ { + require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) + } + +} diff --git a/evm/loomevm.go b/evm/loomevm.go index 1e39d33f11..3d3ae3ff3f 100644 --- a/evm/loomevm.go +++ b/evm/loomevm.go @@ -22,7 +22,6 @@ import ( "github.com/loomnetwork/loomchain/features" "github.com/loomnetwork/loomchain/receipts" "github.com/loomnetwork/loomchain/receipts/handler" - "github.com/loomnetwork/loomchain/store" "github.com/loomnetwork/loomchain/vm" "github.com/pkg/errors" ) @@ -39,6 +38,12 @@ type StateDB interface { Commit(bool) (common.Hash, error) } +type ethdbLogContext struct { + blockHeight int64 + contractAddr loom.Address + callerAddr loom.Address +} + // TODO: this doesn't need to be exported, rename to loomEvmWithState type LoomEvm struct { *Evm @@ -49,24 +54,44 @@ type LoomEvm struct { // TODO: this doesn't need to be exported, rename to newLoomEvmWithState func NewLoomEvm( loomState loomchain.State, accountBalanceManager AccountBalanceManager, - logContext *store.EthDBLogContext, debug bool, + logContext *ethdbLogContext, debug bool, ) (*LoomEvm, error) { - p := &LoomEvm{} + p := new(LoomEvm) + p.db = NewLoomEthdb(loomState, logContext) + oldRoot, err := p.db.Get(rootKey) + if err != nil { + return nil, err + } + var abm *evmAccountBalanceManager - var err error if accountBalanceManager != nil { abm = newEVMAccountBalanceManager(accountBalanceManager, loomState.Block().ChainID) - p.sdb, err = newLoomStateDB(abm, loomState.EVMState().StateDB()) - if err != nil { - return nil, err - } + p.sdb, err = newLoomStateDB(abm, common.BytesToHash(oldRoot), state.NewDatabase(p.db)) } else { - p.sdb = loomState.EVMState().StateDB() + p.sdb, err = state.New(common.BytesToHash(oldRoot), state.NewDatabase(p.db)) + } + if err != nil { + return nil, err } + p.Evm = NewEvm(p.sdb, loomState, abm, debug) return p, nil } +func (levm LoomEvm) Commit() (common.Hash, error) { + root, err := levm.sdb.Commit(true) + if err != nil { + return root, err + } + if err := levm.sdb.Database().TrieDB().Commit(root, false); err != nil { + return root, err + } + if err := levm.db.Put(rootKey, root[:]); err != nil { + return root, err + } + return root, err +} + func (levm LoomEvm) RawDump() []byte { d := levm.sdb.RawDump() output, err := json.MarshalIndent(d, "", " ") @@ -121,31 +146,26 @@ func (lvm LoomVm) accountBalanceManager(readOnly bool) AccountBalanceManager { } func (lvm LoomVm) Create(caller loom.Address, code []byte, value *loom.BigUInt) ([]byte, loom.Address, error) { - logContext := store.NewEthDBLogContext(lvm.state.Block().Height, loom.Address{}, caller) + logContext := ðdbLogContext{ + blockHeight: lvm.state.Block().Height, + contractAddr: loom.Address{}, + callerAddr: caller, + } levm, err := NewLoomEvm(lvm.state, lvm.accountBalanceManager(false), logContext, lvm.debug) if err != nil { return nil, loom.Address{}, err } - stateDB := levm.sdb - lastLogsIndex := len(stateDB.Logs()) - // evm.Create changes Nonce even though tx fails - // To prevent any state change from error tx, create a snapshot and revert EVM state if tx fails - snapshot := stateDB.Snapshot() bytecode, addr, err := levm.Create(caller, code, value) - if err != nil { - stateDB.RevertToSnapshot(snapshot) + if err == nil { + _, err = levm.Commit() } var txHash []byte if lvm.receiptHandler != nil { var events []*ptypes.EventData if err == nil { - addedLogs := stateDB.Logs() - if len(addedLogs) > 0 { - addedLogs = addedLogs[lastLogsIndex:] - } events = lvm.receiptHandler.GetEventsFromLogs( - addedLogs, lvm.state.Block().Height, caller, addr, code, + levm.sdb.Logs(), lvm.state.Block().Height, caller, addr, code, ) } @@ -189,30 +209,26 @@ func (lvm LoomVm) Create(caller loom.Address, code []byte, value *loom.BigUInt) } func (lvm LoomVm) Call(caller, addr loom.Address, input []byte, value *loom.BigUInt) ([]byte, error) { - logContext := store.NewEthDBLogContext(lvm.state.Block().Height, addr, caller) + logContext := ðdbLogContext{ + blockHeight: lvm.state.Block().Height, + contractAddr: addr, + callerAddr: caller, + } levm, err := NewLoomEvm(lvm.state, lvm.accountBalanceManager(false), logContext, lvm.debug) if err != nil { return nil, err } - stateDB := levm.sdb - lastLogsIndex := len(stateDB.Logs()) - // To prevent any state change from error tx, create a snapshot and revert EVM state if tx fails - snapshot := stateDB.Snapshot() _, err = levm.Call(caller, addr, input, value) - if err != nil { - stateDB.RevertToSnapshot(snapshot) + if err == nil { + _, err = levm.Commit() } var txHash []byte if lvm.receiptHandler != nil { var events []*ptypes.EventData if err == nil { - addedLogs := stateDB.Logs() - if len(addedLogs) > 0 { - addedLogs = addedLogs[lastLogsIndex:] - } events = lvm.receiptHandler.GetEventsFromLogs( - addedLogs, lvm.state.Block().Height, caller, addr, input, + levm.sdb.Logs(), lvm.state.Block().Height, caller, addr, input, ) } diff --git a/evm/test_cryptozombies.go b/evm/test_cryptozombies.go index d30080b2cd..d0bf1bb02c 100644 --- a/evm/test_cryptozombies.go +++ b/evm/test_cryptozombies.go @@ -82,9 +82,15 @@ func testCryptoZombiesUpdateState(t *testing.T, state loomchain.State, caller lo vm, _ := manager.InitVM(lvm.VMType_PLUGIN, state) kittyAddr := deployContract(t, vm, motherKat, kittyData.Bytecode, kittyData.RuntimeBytecode) + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) zOwnershipAddr := deployContract(t, vm, caller, zOwnershipData.Bytecode, zOwnershipData.RuntimeBytecode) + + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) checkKitty(t, vm, caller, kittyAddr, kittyData) + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) makeZombie(t, vm, caller, zOwnershipAddr, zOwnershipData, "EEK") + + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) greedyZombie := getZombies(t, vm, caller, zOwnershipAddr, zOwnershipData, 0) // greedy zombie should look like: //{ @@ -101,8 +107,12 @@ func testCryptoZombiesUpdateState(t *testing.T, state loomchain.State, caller lo t.Error("Wrong dna for greedy zombie") } + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) setKittyAddress(t, vm, caller, kittyAddr, zOwnershipAddr, zOwnershipData) + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) zombieFeed(t, vm, caller, zOwnershipAddr, zOwnershipData, 0, 67) + + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) newZombie := getZombies(t, vm, caller, zOwnershipAddr, zOwnershipData, 1) // New zombie should look like //{ diff --git a/evm_state.go b/evm_state.go deleted file mode 100644 index a5f143a569..0000000000 --- a/evm_state.go +++ /dev/null @@ -1,86 +0,0 @@ -package loomchain - -import ( - "bytes" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/loomnetwork/loomchain/store" -) - -// EVMState contains the mutable EVM state. -type EVMState struct { - sdb *state.StateDB - evmStore *store.EvmStore -} - -// NewEVMState returns the EVM state corresponding to the current version of the given store. -func NewEVMState(evmStore *store.EvmStore) (*EVMState, error) { - evmRoot, _ := evmStore.Version() - sdb, err := state.New(common.BytesToHash(evmRoot), state.NewDatabaseWithTrieDB(evmStore.TrieDB())) - if err != nil { - return nil, err - } - return &EVMState{ - evmStore: evmStore, - sdb: sdb, - }, nil -} - -// Commit writes the state changes that occurred since the previous commit to the underlying store. -func (s *EVMState) Commit() error { - if s.evmStore == nil { - panic("EvmStore is nil") - } - evmStateRoot, err := s.sdb.Commit(true) - if err != nil { - return err - } - s.evmStore.SetCurrentRoot(evmStateRoot[:]) - // Clear out old state data such as logs and cache to free up memory - s.sdb.Reset(evmStateRoot) - return nil -} - -// GetSnapshot returns the EVMState instance containing the state as it was at the given version. -// The specified root is expected to match the root of the returned state, if the roots don't match -// an error will be returned. -// NOTE: Do not call Commit on the returned instance. -func (s *EVMState) GetSnapshot(version int64, root []byte) (*EVMState, error) { - r, v := s.evmStore.GetRootAt(version) - if !bytes.Equal(r, root) { - return nil, fmt.Errorf( - "EVM roots mismatch, expected (%d): %X, actual (%d): %X", - version, root, v, r, - ) - } - // The cachingDB instance created by state.NewDatabaseWithTrieDB() contains a codeSizeCache which - // probably shouldn't be shared between the EVMState instance used by the tx handlers and the - // snapshots instances used by the query server. Which is why NewDatabaseWithTrieDB() is used - // here instead of s.sdb.Database(). - sdb, err := state.New( - common.BytesToHash(r), - state.NewDatabaseWithTrieDB(s.evmStore.TrieDB()), - ) - if err != nil { - return nil, err - } - return &EVMState{ - evmStore: nil, // this will ensure that Commit() will panic - sdb: sdb, - }, nil -} - -// Clone returns a copy of the EVMState instance. -// NOTE: Do not call Commit on the returned instance. -func (s *EVMState) Clone() *EVMState { - return &EVMState{ - evmStore: nil, // this will ensure that Commit() will panic - sdb: s.sdb.Copy(), - } -} - -func (s *EVMState) StateDB() *state.StateDB { - return s.sdb -} diff --git a/plugin/fake_context.go b/plugin/fake_context.go index 2db97990ee..84ddb384fe 100644 --- a/plugin/fake_context.go +++ b/plugin/fake_context.go @@ -10,9 +10,7 @@ import ( "github.com/loomnetwork/go-loom/plugin" "github.com/loomnetwork/go-loom/types" "github.com/loomnetwork/loomchain" - cdb "github.com/loomnetwork/loomchain/db" levm "github.com/loomnetwork/loomchain/evm" - "github.com/loomnetwork/loomchain/store" abci "github.com/tendermint/tendermint/abci/types" ) @@ -37,18 +35,9 @@ func CreateFakeContextWithEVM(caller, address loom.Address) *FakeContextWithEVM }, ) state := loomchain.NewStoreState(context.Background(), ctx, block, nil, nil) - evmDB, err := cdb.LoadDB("memdb", "", "", 256, 4, false) - if err != nil { - panic(err) - } - evmStore := store.NewEvmStore(evmDB, 100, 0) - evmState, err := loomchain.NewEVMState(evmStore) - if err != nil { - panic(err) - } return &FakeContextWithEVM{ FakeContext: ctx, - State: state.WithEVMState(evmState), + State: state, } } diff --git a/plugin/vm_test.go b/plugin/vm_test.go index 151117ae61..f089fab0e3 100644 --- a/plugin/vm_test.go +++ b/plugin/vm_test.go @@ -19,7 +19,6 @@ import ( ptypes "github.com/loomnetwork/go-loom/plugin/types" "github.com/loomnetwork/go-loom/testdata" "github.com/loomnetwork/loomchain" - "github.com/loomnetwork/loomchain/db" "github.com/loomnetwork/loomchain/eth/subs" "github.com/loomnetwork/loomchain/events" levm "github.com/loomnetwork/loomchain/evm" @@ -123,15 +122,6 @@ func (c *VMTestContract) CheckQueryCaller(ctx contract.StaticContext, args *test return &testdata.StaticCallResult{}, nil } -func mockEVMState() *loomchain.EVMState { - memDb, _ := db.LoadMemDB() - evmState, err := loomchain.NewEVMState(store.NewEvmStore(memDb, 100, 0)) - if err != nil { - panic(err) - } - return evmState -} - func TestPluginVMContractContextCaller(t *testing.T) { fc1 := &VMTestContract{t: t, Name: "fakecontract1"} @@ -147,7 +137,7 @@ func TestPluginVMContractContextCaller(t *testing.T) { Height: int64(34), Time: time.Unix(123456789, 0), } - state := loomchain.NewStoreState(context.Background(), store.NewMemStore(), block, nil, nil).WithEVMState(mockEVMState()) + state := loomchain.NewStoreState(context.Background(), store.NewMemStore(), block, nil, nil) createRegistry, err := registry.NewRegistryFactory(registry.LatestRegistryVersion) require.NoError(t, err) diff --git a/store/config.go b/store/config.go index 06f906e056..3ac62f702a 100644 --- a/store/config.go +++ b/store/config.go @@ -12,7 +12,8 @@ type AppStoreConfig struct { PruneInterval int64 // Number of versions to prune at a time. PruneBatchSize int64 - // Obsolete and no longer used + // If true the app store will write EVM state to both IAVLStore and EvmStore + // This config works with AppStore Version 3 (MultiWriterAppStore) only SaveEVMStateToIAVL bool // Specifies the number of IAVL tree versions that should be kept in memory before writing a new // version to disk. diff --git a/store/evmstore.go b/store/evmstore.go index 9461d3ec75..cc01297bd3 100644 --- a/store/evmstore.go +++ b/store/evmstore.go @@ -3,13 +3,13 @@ package store import ( "bytes" "encoding/binary" + "sort" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie" "github.com/go-kit/kit/metrics" kitprometheus "github.com/go-kit/kit/metrics/prometheus" lru "github.com/hashicorp/golang-lru" + "github.com/loomnetwork/go-loom/plugin" "github.com/loomnetwork/go-loom/util" "github.com/loomnetwork/loomchain/db" "github.com/pkg/errors" @@ -19,8 +19,6 @@ import ( var ( defaultRoot = []byte{1} rootHashKey = util.PrefixKey(vmPrefix, rootKey) - // Prefix for versioned Patricia roots - evmRootPrefix = []byte("evmroot") commitDuration metrics.Histogram ) @@ -55,36 +53,137 @@ func getVersionFromEvmRootKey(key []byte) (int64, error) { // EvmStore persists EVM state to a DB. type EvmStore struct { evmDB db.DBWrapper + cache map[string]cacheItem rootHash []byte lastSavedRoot []byte rootCache *lru.Cache version int64 - trieDB *trie.Database - flushInterval int64 } // NewEvmStore returns a new instance of the store backed by the given DB. -func NewEvmStore(evmDB db.DBWrapper, numCachedRoots int, flushInterval int64) *EvmStore { +func NewEvmStore(evmDB db.DBWrapper, numCachedRoots int) *EvmStore { rootCache, err := lru.New(numCachedRoots) if err != nil { panic(err) } evmStore := &EvmStore{ - evmDB: evmDB, - rootCache: rootCache, - flushInterval: flushInterval, + evmDB: evmDB, + cache: make(map[string]cacheItem), + rootCache: rootCache, } - ethDB := NewLoomEthDB(evmDB) - evmStore.trieDB = trie.NewDatabase(ethDB) return evmStore } -// Commit may persist the changes made to the store since the last commit to the underlying DB. -// The specified version is associated with the current root, which is returned by this function. -// Whether or not changes are actually flushed to the DB depends on the flush interval, which can -// be specified when calling NewEvmStore(), and overriden via the flushIntervalOverride parameter -// when calling Commit() iff the store was created with flushInterval == 0. -func (s *EvmStore) Commit(version, flushIntervalOverride int64) []byte { +func (s *EvmStore) setCache(key, val []byte, deleted bool) { + s.cache[string(key)] = cacheItem{ + Value: val, + Deleted: deleted, + } +} + +// Range iterates in-order over the keys in the store prefixed by the given prefix. +// TODO (VM): This needs a proper review, other than tests there is no code that really makes use of +// this function, only place it's called is from MultiWriterAppStore.Range but only when +// iterating over the "vm" prefix - which no code currently does. +// NOTE: This version of EvmStore supports Range(nil) +func (s *EvmStore) Range(prefix []byte) plugin.RangeData { + rangeCacheKeys := []string{} + rangeCache := make(map[string][]byte) + + // Add records from evm.db to range cache + iter := s.evmDB.Iterator(prefix, nil) + defer iter.Close() + for ; iter.Valid(); iter.Next() { + key := string(iter.Key()) + value := iter.Value() + if util.HasPrefix([]byte(key), prefix) || len(prefix) == 0 { + rangeCache[key] = value + rangeCacheKeys = append(rangeCacheKeys, key) + } + } + + // Update range cache with data in cache + for key, c := range s.cache { + if util.HasPrefix([]byte(key), prefix) || len(prefix) == 0 { + if c.Deleted { + rangeCacheKeys = remove(rangeCacheKeys, key) + rangeCache[key] = nil + continue + } + if _, ok := rangeCache[key]; !ok { + rangeCacheKeys = append(rangeCacheKeys, string(key)) + } + rangeCache[key] = c.Value + } + } + + // Make Range return root hash (vmvmroot) from EvmStore.rootHash + if _, exist := rangeCache[string(rootHashKey)]; exist { + rangeCache[string(rootHashKey)] = s.rootHash + } + + ret := make(plugin.RangeData, 0) + // Sorting makes RangeData deterministic + sort.Strings(rangeCacheKeys) + for _, key := range rangeCacheKeys { + var unprefixedKey []byte + var err error + if len(prefix) > 0 { + unprefixedKey, err = util.UnprefixKey([]byte(key), prefix) + if err != nil { + continue + } + } else { + unprefixedKey = []byte(key) + } + re := &plugin.RangeEntry{ + Key: unprefixedKey, + Value: rangeCache[key], + } + ret = append(ret, re) + } + return ret +} + +func (s *EvmStore) Has(key []byte) bool { + // EvmStore always has Patricia root + if bytes.Equal(key, rootHashKey) { + return true + } + if item, ok := s.cache[string(key)]; ok { + return !item.Deleted + } + return s.evmDB.Has(key) +} + +func (s *EvmStore) Get(key []byte) []byte { + if bytes.Equal(key, rootHashKey) { + return s.rootHash + } + + if item, ok := s.cache[string(key)]; ok { + return item.Value + } + return s.evmDB.Get(key) +} + +func (s *EvmStore) Delete(key []byte) { + if bytes.Equal(key, rootHashKey) { + s.rootHash = nil + } else { + s.setCache(key, nil, true) + } +} + +func (s *EvmStore) Set(key, val []byte) { + if bytes.Equal(key, rootHashKey) { + s.rootHash = val + } else { + s.setCache(key, val, false) + } +} + +func (s *EvmStore) Commit(version int64) []byte { defer func(begin time.Time) { commitDuration.Observe(time.Since(begin).Seconds()) }(time.Now()) @@ -95,41 +194,30 @@ func (s *EvmStore) Commit(version, flushIntervalOverride int64) []byte { if bytes.Equal(currentRoot, []byte{}) { currentRoot = defaultRoot } - - flushInterval := s.flushInterval - if flushInterval == 0 { - flushInterval = flushIntervalOverride - } else if flushInterval == -1 { - flushInterval = 0 + // save Patricia root of EVM state only if it changes + if !bytes.Equal(currentRoot, s.lastSavedRoot) { + s.Set(evmRootKey(version), currentRoot) } - // Only commit Patricia tree every N blocks - // TODO: What happens to all the roots that don't get committed? Are they just going to accumulate - // in the trie.Database.nodes cache forever? - if flushInterval == 0 || version%flushInterval == 0 { - // If the root hasn't changed since the last call to Commit that means no new state changes - // occurred in the trie DB since then, so we can skip committing. - if !bytes.Equal(defaultRoot, currentRoot) && !bytes.Equal(currentRoot, s.lastSavedRoot) { - // trie.Database.Commit will call NewBatch (indirectly) to batch writes to evmDB - if err := s.trieDB.Commit(common.BytesToHash(currentRoot), false); err != nil { - panic(err) - } - } + s.rootCache.Add(version, currentRoot) - // We don't commit empty root but we need to save default root ([]byte{1}) as a placeholder of empty root - // So the node won't get EVM root mismatch during the EVM root checking - if !bytes.Equal(currentRoot, s.lastSavedRoot) { - s.evmDB.Set(evmRootKey(version), currentRoot) - s.lastSavedRoot = currentRoot + batch := s.evmDB.NewBatch() + for key, item := range s.cache { + if !item.Deleted { + batch.Set([]byte(key), item.Value) + } else { + batch.Delete([]byte(key)) } } - - s.rootCache.Add(version, currentRoot) + batch.Write() + s.cache = make(map[string]cacheItem) + s.lastSavedRoot = currentRoot s.version = version return currentRoot } func (s *EvmStore) LoadVersion(targetVersion int64) error { + s.cache = make(map[string]cacheItem) // find the last saved root root, version := s.getLastSavedRoot(targetVersion) if bytes.Equal(root, defaultRoot) { @@ -152,20 +240,6 @@ func (s *EvmStore) Version() ([]byte, int64) { return s.rootHash, s.version } -func (s *EvmStore) TrieDB() *trie.Database { - return s.trieDB -} - -// SetCurrentRoot sets the current EVM state root, this root must exist in the current trie DB. -// NOTE: This function must be called prior to each call to Commit. -// TODO: This is clunky, the root should just be passed into Commit! -func (s *EvmStore) SetCurrentRoot(root []byte) { - s.rootHash = root -} - -// getLastSavedRoot retrieves the EVM state root from disk that best matches the given version. -// The roots are not written to disk for every version, they only get written out when they change -// between versions, and even then depending on the flush interval some roots won't be written to disk. func (s *EvmStore) getLastSavedRoot(targetVersion int64) ([]byte, int64) { start := util.PrefixKey(vmPrefix, evmRootPrefix) end := prefixRangeEnd(evmRootKey(targetVersion)) @@ -183,20 +257,51 @@ func (s *EvmStore) getLastSavedRoot(targetVersion int64) ([]byte, int64) { return nil, 0 } -// GetRootAt returns the EVM state root corresponding to the given version. -// The second return value is version of the EVM state that corresponds to the returned root, -// it may be less than the version requested due to the reasons mentioned in getLastSavedRoot. -func (s *EvmStore) GetRootAt(version int64) ([]byte, int64) { - // Expect cache to be almost 100% hit since cache miss yields extremely poor performance. - // There's an assumption here that the cache will almost always contain all the in-mem-only - // roots that haven't been flushed to disk yet, in the rare case where such a root is evicted - // from the cache the last root persisted to disk will be returned instead. This means it's - // possible (though highly unlikely) for queries to return stale state (since they rely on - // snapshots corresponding to specific versions). This could be fixed by storing the in-mem-only - // roots in another map instead of, or in addition to the cache. +func (s *EvmStore) GetSnapshot(version int64) db.Snapshot { + var targetRoot []byte + // Expect cache to be almost 100% hit since cache miss yields extremely poor performance val, exist := s.rootCache.Get(version) if exist { - return val.([]byte), version + targetRoot = val.([]byte) + } else { + targetRoot, _ = s.getLastSavedRoot(version) + } + return NewEvmStoreSnapshot(s.evmDB.GetSnapshot(), targetRoot) +} + +func NewEvmStoreSnapshot(snapshot db.Snapshot, rootHash []byte) *EvmStoreSnapshot { + return &EvmStoreSnapshot{ + Snapshot: snapshot, + rootHash: rootHash, + } +} + +type EvmStoreSnapshot struct { + db.Snapshot + rootHash []byte +} + +func (s *EvmStoreSnapshot) Get(key []byte) []byte { + if bytes.Equal(key, rootHashKey) { + return s.rootHash + } + return s.Snapshot.Get(key) +} + +func (s *EvmStoreSnapshot) Has(key []byte) bool { + // snapshot always has a root hash + // nil or empty root hash is considered valid root hash + if bytes.Equal(key, rootHashKey) { + return true + } + return s.Snapshot.Has(key) +} + +func remove(keys []string, key string) []string { + for i, value := range keys { + if value == key { + return append(keys[:i], keys[i+1:]...) + } } - return s.getLastSavedRoot(version) + return keys } diff --git a/store/evmstore_test.go b/store/evmstore_test.go index b54df2db88..b141051195 100644 --- a/store/evmstore_test.go +++ b/store/evmstore_test.go @@ -2,8 +2,10 @@ package store import ( "bytes" + "fmt" "testing" + "github.com/loomnetwork/go-loom/util" "github.com/loomnetwork/loomchain/db" "github.com/stretchr/testify/suite" ) @@ -18,6 +20,118 @@ func (t *EvmStoreTestSuite) SetupTest() { func TestEvmStoreTestSuite(t *testing.T) { suite.Run(t, new(EvmStoreTestSuite)) } +func (t *EvmStoreTestSuite) TestEvmStoreRangeAndCommit() { + require := t.Require() + evmDb, err := db.LoadMemDB() + require.NoError(err) + evmStore := NewEvmStore(evmDb, 100) + for i := 0; i <= 100; i++ { + key := []byte(fmt.Sprintf("Key%d", i)) + evmStore.Set(key, key) + } + evmStore.Set([]byte("hellovm"), []byte("world")) + evmStore.Set([]byte("hellovm"), []byte("world3")) + evmStore.Set([]byte("hello1"), []byte("world1")) + evmStore.Set([]byte("hello2"), []byte("world2")) + evmStore.Set([]byte("hello3"), []byte("world3")) + evmStore.Set([]byte("hello3"), []byte("world3")) + evmStore.Delete([]byte("hello2")) + dataRange := evmStore.Range(nil) + require.Equal(104, len(dataRange)) + require.Equal(false, evmStore.Has([]byte("hello2"))) + evmStore.Commit(1) + evmStore.Set([]byte("SSSSS"), []byte("SSSSS")) + evmStore.Set([]byte("vvvvv"), []byte("vvv")) + dataRange = evmStore.Range(nil) + require.Equal(107, len(dataRange)) + evmStore.Commit(2) + evmStore.Set([]byte("SSSSS"), []byte("S1")) + ret := evmStore.Get([]byte("SSSSS")) + require.Equal(0, bytes.Compare(ret, []byte("S1"))) + evmStore.Delete([]byte("SSSSS")) + evmStore.Delete([]byte("hello1")) + dataRange = evmStore.Range(nil) + require.Equal(105, len(dataRange)) + evmStore.Commit(3) + evmStore.Delete([]byte("SSSSS")) + evmStore.Delete([]byte("hello1")) + dataRange = evmStore.Range(nil) + require.Equal(105, len(dataRange)) +} + +func (t *EvmStoreTestSuite) TestEvmStoreBasicMethods() { + require := t.Require() + // Test Get|Set|Has|Delete methods + evmDb, err := db.LoadMemDB() + require.NoError(err) + evmStore := NewEvmStore(evmDb, 100) + key1 := []byte("hello") + key2 := []byte("hello2") + value1 := []byte("world") + value2 := []byte("world2") + value3 := []byte("This is a new value") + evmStore.Set(key1, value1) + evmStore.Set(key2, value2) + result := evmStore.Get(key1) + require.Equal(0, bytes.Compare(value1, result)) + evmStore.Set(key1, value3) + result = evmStore.Get(key1) + require.Equal(0, bytes.Compare(value3, result)) + has := evmStore.Has(key1) + require.Equal(true, has) + evmStore.Delete(key1) + has = evmStore.Has(key1) + require.Equal(false, has) + result = evmStore.Get(key1) + fmt.Println(result) + require.Equal(0, len(result)) +} + +func (t *EvmStoreTestSuite) TestEvmStoreRangePrefix() { + require := t.Require() + // Test Range Prefix + evmDb, err := db.LoadMemDB() + require.NoError(err) + evmStore := NewEvmStore(evmDb, 100) + for i := 0; i <= 100; i++ { + key := []byte(fmt.Sprintf("Key%d", i)) + evmStore.Set(key, key) + } + for i := 0; i <= 100; i++ { + key := []byte(fmt.Sprintf("vv%dKey", i)) + evmStore.Set(key, key) + } + dataRange := evmStore.Range(nil) + require.Equal(202, len(dataRange)) + + dataRange = evmStore.Range([]byte("Key")) + require.Equal(0, len(dataRange)) + + for i := 0; i <= 100; i++ { + key := util.PrefixKey([]byte("Key"), []byte(fmt.Sprintf("%d", i))) + evmStore.Set(key, key) + key = util.PrefixKey([]byte("vv"), []byte(fmt.Sprintf("%d", i))) + evmStore.Set(key, key) + } + + dataRange = evmStore.Range([]byte("Key")) + require.Equal(101, len(dataRange)) + + dataRange = evmStore.Range([]byte("vv")) + require.Equal(101, len(dataRange)) + + evmStore.Commit(1) + dataRange = evmStore.Range([]byte("Key")) + require.Equal(101, len(dataRange)) + + dataRange = evmStore.Range([]byte("vv")) + require.Equal(101, len(dataRange)) + + evmStore.Commit(2) + evmStore.Delete(util.PrefixKey([]byte("vv"), []byte(fmt.Sprintf("%d", 10)))) + dataRange = evmStore.Range([]byte("vv")) + require.Equal(100, len(dataRange)) +} func (t *EvmStoreTestSuite) TestLoadVersionEvmStore() { require := t.Require() @@ -29,7 +143,7 @@ func (t *EvmStoreTestSuite) TestLoadVersionEvmStore() { evmDb.Set(evmRootKey(100), []byte{100}) evmDb.Set(evmRootKey(200), []byte{200}) - evmStore := NewEvmStore(evmDb, 100, 0) + evmStore := NewEvmStore(evmDb, 100) err = evmStore.LoadVersion(500) require.NoError(err) root, version := evmStore.Version() diff --git a/store/iavlstore.go b/store/iavlstore.go index 535546ee16..253d6f5a00 100755 --- a/store/iavlstore.go +++ b/store/iavlstore.go @@ -140,7 +140,7 @@ func (s *IAVLStore) Version() int64 { return s.tree.Version() } -func (s *IAVLStore) SaveVersion(opts *VersionedKVStoreSaveOptions) ([]byte, int64, error) { +func (s *IAVLStore) SaveVersion() ([]byte, int64, error) { var err error defer func(begin time.Time) { iavlSaveVersionDuration.Observe(time.Since(begin).Seconds()) @@ -149,8 +149,16 @@ func (s *IAVLStore) SaveVersion(opts *VersionedKVStoreSaveOptions) ([]byte, int6 oldVersion := s.Version() flushInterval := s.flushInterval - if flushInterval == 0 && opts != nil { - flushInterval = opts.FlushInterval + // TODO: Rather than loading the on-chain config here the flush interval override should be passed + // in as a parameter to SaveVersion(). + if flushInterval == 0 { + cfg, err := LoadOnChainConfig(s) + if err != nil { + return nil, 0, errors.Wrap(err, "failed to load on-chain config") + } + if cfg.GetAppStore().GetIAVLFlushInterval() != 0 { + flushInterval = int64(cfg.GetAppStore().GetIAVLFlushInterval()) + } } else if flushInterval == -1 { flushInterval = 0 } @@ -201,8 +209,11 @@ func (s *IAVLStore) Prune() error { return nil } -func (s *IAVLStore) GetSnapshotAt(version int64) (Snapshot, error) { - panic("not implemented") +func (s *IAVLStore) GetSnapshot() Snapshot { + // This isn't an actual snapshot obviously, and never will be, but lets pretend... + return &iavlStoreSnapshot{ + IAVLStore: s, + } } // NewIAVLStore creates a new IAVLStore. diff --git a/store/iavlstore_test.go b/store/iavlstore_test.go index 7970e33e91..aaee17a3df 100644 --- a/store/iavlstore_test.go +++ b/store/iavlstore_test.go @@ -44,12 +44,12 @@ func testOrphans(t *testing.T, store *IAVLStore, diskDb db.DB, flushInterval int store.Set([]byte("k1"), []byte("Fred")) store.Set([]byte("k2"), []byte("John")) for i := 0; i < int(flushInterval-1); i++ { - _, _, err := store.SaveVersion(nil) + _, _, err := store.SaveVersion() require.NoError(t, err) } store.Set([]byte("k2"), []byte("Bob")) store.Set([]byte("k3"), []byte("Harry")) - _, _, err := store.SaveVersion(nil) // save to disk + _, _, err := store.SaveVersion() // save to disk require.NoError(t, err) @@ -57,13 +57,13 @@ func testOrphans(t *testing.T, store *IAVLStore, diskDb db.DB, flushInterval int store.Set([]byte("k2"), []byte("Sally")) store.Delete([]byte("k3")) for i := 0; i < int(flushInterval)-1; i++ { - _, _, err := store.SaveVersion(nil) + _, _, err := store.SaveVersion() require.NoError(t, err) } store.Set([]byte("k2"), []byte("Jim")) for i := 0; i < 2*int(flushInterval); i++ { - _, _, err := store.SaveVersion(nil) // save to disk + _, _, err := store.SaveVersion() // save to disk require.NoError(t, err) } lastVersion := 3 * flushInterval @@ -192,7 +192,7 @@ func testMaxVersions(t *testing.T) { func executeBlocks(t require.TestingT, blocks []*iavl.Program, store IAVLStore) { for _, block := range blocks { require.NoError(t, block.Execute(store.tree)) - _, _, err := store.SaveVersion(nil) + _, _, err := store.SaveVersion() require.NoError(t, err) require.NoError(t, store.Prune()) } diff --git a/store/logstore.go b/store/logstore.go index ea8a576709..2e14a6b981 100644 --- a/store/logstore.go +++ b/store/logstore.go @@ -114,8 +114,8 @@ func (s *LogStore) Version() int64 { return version } -func (s *LogStore) SaveVersion(opts *VersionedKVStoreSaveOptions) ([]byte, int64, error) { - vByte, vInt, err := s.store.SaveVersion(opts) +func (s *LogStore) SaveVersion() ([]byte, int64, error) { + vByte, vInt, err := s.store.SaveVersion() if s.params.LogSaveVersion { s.logger.Println("SaveVersion", string(vByte), " int ", vInt, " err ", err) } @@ -126,6 +126,6 @@ func (s *LogStore) Prune() error { return s.store.Prune() } -func (s *LogStore) GetSnapshotAt(version int64) (Snapshot, error) { - return s.store.GetSnapshotAt(version) +func (s *LogStore) GetSnapshot() Snapshot { + return s.store.GetSnapshot() } diff --git a/store/loomethdb_test.go b/store/loomethdb_test.go deleted file mode 100644 index f0ff09cdeb..0000000000 --- a/store/loomethdb_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package store - -import ( - "bytes" - "sort" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSortBarch(t *testing.T) { - test1 := []kvPair{ - {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, - {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, - {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, - {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, - {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, - {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, - {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, - } - sort.Slice(test1, func(j, k int) bool { - return bytes.Compare(test1[j].key, test1[k].key) < 0 - }) - - test2 := []kvPair{ - {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, - {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, - {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, - {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, - {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, - {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, - {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, - } - - sort.Slice(test2, func(j, k int) bool { - return bytes.Compare(test2[j].key, test2[k].key) < 0 - }) - - for i := 0; i < len(test1); i++ { - require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) - } - -} diff --git a/store/memstore.go b/store/memstore.go index ab466e2e9a..badf02b59f 100644 --- a/store/memstore.go +++ b/store/memstore.go @@ -68,7 +68,7 @@ func (m *MemStore) Version() int64 { return 1 } -func (m *MemStore) SaveVersion(opts *VersionedKVStoreSaveOptions) ([]byte, int64, error) { +func (m *MemStore) SaveVersion() ([]byte, int64, error) { return m.Hash(), m.Version(), nil } @@ -76,6 +76,6 @@ func (m *MemStore) Prune() error { return nil } -func (m *MemStore) GetSnapshotAt(version int64) (Snapshot, error) { +func (m *MemStore) GetSnapshot() Snapshot { panic("not implemented") } diff --git a/store/multi_writer_app_store.go b/store/multi_writer_app_store.go index f3e7ef9e6c..598da8a296 100644 --- a/store/multi_writer_app_store.go +++ b/store/multi_writer_app_store.go @@ -11,6 +11,8 @@ import ( kitprometheus "github.com/go-kit/kit/metrics/prometheus" "github.com/loomnetwork/go-loom/plugin" "github.com/loomnetwork/go-loom/util" + "github.com/loomnetwork/loomchain/db" + "github.com/loomnetwork/loomchain/features" "github.com/loomnetwork/loomchain/log" "github.com/pkg/errors" stdprometheus "github.com/prometheus/client_golang/prometheus" @@ -23,6 +25,14 @@ var ( vmPrefix = []byte("vm") // This is the same key as rootKey in evm/loomevm.go rootKey = []byte("vmroot") + // This is the same key as featurePrefix in app.go + featurePrefix = []byte("feature") + // Using the same featurePrefix as in app.go, and the same EvmDBFeature name as in features.go + evmDBFeatureKey = util.PrefixKey(featurePrefix, []byte(features.EvmDBFeature)) + // Using the same featurePrefix as in app.go, and the same AppStoreVersion3_1 name as in features.go + appStoreVersion3_1 = util.PrefixKey(featurePrefix, []byte(features.AppStoreVersion3_1)) + // This is the prefix of versioning Patricia roots + evmRootPrefix = []byte("evmroot") saveVersionDuration metrics.Histogram getSnapshotDuration metrics.Histogram @@ -46,7 +56,7 @@ func init() { Namespace: "loomchain", Subsystem: "multi_writer_appstore", Name: "get_snapshot", - Help: "How long MultiWriterAppStore.GetSnapshotAt() took to execute (in seconds)", + Help: "How long MultiWriterAppStore.GetSnapshot() took to execute (in seconds)", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, []string{}, ) @@ -71,32 +81,23 @@ func init() { ) } -// GetEVMRootFromAppStore retrieves the current EVM root from the given app store. -func GetEVMRootFromAppStore(s KVReader) []byte { - evmRoot := s.Get(rootKey) - if evmRoot == nil { - return defaultRoot - } - return evmRoot -} - -// MultiWriterAppStore keeps the EVM Patricia tree and IAVL tree roots in sync for each version so -// that all on-chain state can be consistently persisted & loaded at any height. -// -// A previous version of this store used to handle EVM state keys (denoted by the "vm" prefix) but -// the current version is only capable of pruning old EVM state keys from the IAVLStore, the EVM -// state keys are now handled by the EVMState. +// MultiWriterAppStore reads & writes keys that have the "vm" prefix via both the IAVLStore and the EvmStore, +// or just the EvmStore, depending on the evmStoreEnabled flag. type MultiWriterAppStore struct { - appStore *IAVLStore - evmStore *EvmStore - lastSavedTree unsafe.Pointer // *iavl.ImmutableTree + appStore *IAVLStore + evmStore *EvmStore + lastSavedTree unsafe.Pointer // *iavl.ImmutableTree + onlySaveEvmStateToEvmStore bool } // NewMultiWriterAppStore creates a new MultiWriterAppStore. -func NewMultiWriterAppStore(appStore *IAVLStore, evmStore *EvmStore) (*MultiWriterAppStore, error) { +func NewMultiWriterAppStore( + appStore *IAVLStore, evmStore *EvmStore, saveEVMStateToIAVL bool, +) (*MultiWriterAppStore, error) { store := &MultiWriterAppStore{ - appStore: appStore, - evmStore: evmStore, + appStore: appStore, + evmStore: evmStore, + onlySaveEvmStateToEvmStore: !saveEVMStateToIAVL, } appStoreEvmRoot := store.appStore.Get(rootKey) // if root is nil, this is the first run after migration, so get evmroot from vmvmroot @@ -107,31 +108,57 @@ func NewMultiWriterAppStore(appStore *IAVLStore, evmStore *EvmStore) (*MultiWrit appStoreEvmRoot = defaultRoot } } - evmStoreEvmRoot, version := store.evmStore.GetRootAt(store.appStore.Version()) + evmStoreEvmRoot, version := store.evmStore.getLastSavedRoot(store.appStore.Version()) if !bytes.Equal(appStoreEvmRoot, evmStoreEvmRoot) { return nil, fmt.Errorf("EVM roots mismatch, evm.db(%d): %X, app.db(%d): %X", version, evmStoreEvmRoot, appStore.Version(), appStoreEvmRoot) } - if err := store.setLastSavedTreeToVersion(appStore.Version()); err != nil { - return nil, err + // feature flag overrides SaveEVMStateToIAVL + if !store.onlySaveEvmStateToEvmStore { + store.onlySaveEvmStateToEvmStore = bytes.Equal(store.appStore.Get(evmDBFeatureKey), []byte{1}) } + + store.setLastSavedTreeToVersion(appStore.Version()) return store, nil } func (s *MultiWriterAppStore) Delete(key []byte) { - s.appStore.Delete(key) + if util.HasPrefix(key, vmPrefix) { + s.evmStore.Delete(key) + if !s.onlySaveEvmStateToEvmStore { + s.appStore.Delete(key) + } + } else { + s.appStore.Delete(key) + } } func (s *MultiWriterAppStore) Set(key, val []byte) { - s.appStore.Set(key, val) + if !s.onlySaveEvmStateToEvmStore && bytes.Equal(key, evmDBFeatureKey) { + s.onlySaveEvmStateToEvmStore = bytes.Equal(val, []byte{1}) + } + if util.HasPrefix(key, vmPrefix) { + s.evmStore.Set(key, val) + if !s.onlySaveEvmStateToEvmStore { + s.appStore.Set(key, val) + } + } else { + s.appStore.Set(key, val) + } } func (s *MultiWriterAppStore) Has(key []byte) bool { + if util.HasPrefix(key, vmPrefix) { + return s.evmStore.Has(key) + } return s.appStore.Has(key) } func (s *MultiWriterAppStore) Get(key []byte) []byte { + if util.HasPrefix(key, vmPrefix) { + return s.evmStore.Get(key) + } return s.appStore.Get(key) } @@ -141,6 +168,9 @@ func (s *MultiWriterAppStore) Range(prefix []byte) plugin.RangeData { panic(errors.New("Range over nil prefix not implemented")) } + if bytes.Equal(prefix, vmPrefix) || util.HasPrefix(prefix, vmPrefix) { + return s.evmStore.Range(prefix) + } return s.appStore.Range(prefix) } @@ -152,31 +182,32 @@ func (s *MultiWriterAppStore) Version() int64 { return s.appStore.Version() } -func (s *MultiWriterAppStore) SaveVersion(opts *VersionedKVStoreSaveOptions) ([]byte, int64, error) { +func (s *MultiWriterAppStore) SaveVersion() ([]byte, int64, error) { var err error defer func(begin time.Time) { saveVersionDuration.Observe(time.Since(begin).Seconds()) }(time.Now()) - var flushInterval int64 - if opts != nil { - flushInterval = opts.FlushInterval - } - currentRoot := s.evmStore.Commit(s.Version()+1, flushInterval) - // Store the root of the EVM Patricia tree in the IAVL tree. - // Only write the EVM root to the IAVL store if it changes, this was previously only done - // once the AppStoreVersion3_1 feature flag was enabled, but it's now assumed the flag is - // always enabled so the feature check is omitted. - oldRoot := s.appStore.Get(rootKey) - if !bytes.Equal(oldRoot, currentRoot) { - s.appStore.Set(rootKey, currentRoot) - } + currentRoot := s.evmStore.Commit(s.Version() + 1) + if s.onlySaveEvmStateToEvmStore { + // Tie up Patricia tree with IAVL tree. + // Do this after the feature flag is enabled so that we can detect + // inconsistency in evm.db across the cluster + // AppStore 3.1 writes EVM root to app.db only if it changes + if bytes.Equal(s.appStore.Get(appStoreVersion3_1), []byte{1}) { + oldRoot := s.appStore.Get(rootKey) + if !bytes.Equal(oldRoot, currentRoot) { + s.appStore.Set(rootKey, currentRoot) + } + } else { + s.appStore.Set(rootKey, currentRoot) + } - if err := s.pruneOldEVMKeys(); err != nil { - return nil, 0, err + if err := s.pruneOldEVMKeys(); err != nil { + return nil, 0, err + } } - - hash, version, err := s.appStore.SaveVersion(opts) + hash, version, err := s.appStore.SaveVersion() s.setLastSavedTreeToVersion(version) return hash, version, err } @@ -230,43 +261,43 @@ func (s *MultiWriterAppStore) Prune() error { return s.appStore.Prune() } -func (s *MultiWriterAppStore) GetSnapshotAt(version int64) (Snapshot, error) { +func (s *MultiWriterAppStore) GetSnapshot() Snapshot { defer func(begin time.Time) { getSnapshotDuration.Observe(time.Since(begin).Seconds()) }(time.Now()) - - var err error - var appStoreTree *iavl.ImmutableTree - if version == 0 { - appStoreTree = (*iavl.ImmutableTree)(atomic.LoadPointer(&s.lastSavedTree)) - } else { - appStoreTree, err = s.appStore.tree.GetImmutable(version) - if err != nil { - return nil, errors.Wrapf(err, "failed to load immutable tree for version %v", version) - } - } - return newMultiWriterStoreSnapshot(appStoreTree), nil + appStoreTree := (*iavl.ImmutableTree)(atomic.LoadPointer(&s.lastSavedTree)) + evmDbSnapshot := s.evmStore.GetSnapshot(appStoreTree.Version()) + return newMultiWriterStoreSnapshot(evmDbSnapshot, appStoreTree) } type multiWriterStoreSnapshot struct { - appStoreTree *iavl.ImmutableTree + evmDbSnapshot db.Snapshot + appStoreTree *iavl.ImmutableTree } -func newMultiWriterStoreSnapshot(appStoreTree *iavl.ImmutableTree) *multiWriterStoreSnapshot { +func newMultiWriterStoreSnapshot(evmDbSnapshot db.Snapshot, appStoreTree *iavl.ImmutableTree) *multiWriterStoreSnapshot { return &multiWriterStoreSnapshot{ - appStoreTree: appStoreTree, + evmDbSnapshot: evmDbSnapshot, + appStoreTree: appStoreTree, } } func (s *multiWriterStoreSnapshot) Release() { + s.evmDbSnapshot.Release() s.appStoreTree = nil } func (s *multiWriterStoreSnapshot) Has(key []byte) bool { + if util.HasPrefix(key, vmPrefix) { + return s.evmDbSnapshot.Has(key) + } return s.appStoreTree.Has(key) } func (s *multiWriterStoreSnapshot) Get(key []byte) []byte { + if util.HasPrefix(key, vmPrefix) { + return s.evmDbSnapshot.Get(key) + } _, val := s.appStoreTree.Get(key) return val } @@ -278,6 +309,30 @@ func (s *multiWriterStoreSnapshot) Range(prefix []byte) plugin.RangeData { } ret := make(plugin.RangeData, 0) + + if bytes.Equal(prefix, vmPrefix) || util.HasPrefix(prefix, vmPrefix) { + it := s.evmDbSnapshot.NewIterator(prefix, prefixRangeEnd(prefix)) + defer it.Close() + + for ; it.Valid(); it.Next() { + key := it.Key() + if util.HasPrefix(key, prefix) { + var err error + key, err = util.UnprefixKey(key, prefix) + if err != nil { + panic(err) + } + + ret = append(ret, &plugin.RangeEntry{ + Key: key, + Value: it.Value(), + }) + } + } + return ret + } + + // Otherwise iterate over the IAVL tree keys, values, _, err := s.appStoreTree.GetRangeWithProof(prefix, prefixRangeEnd(prefix), 0) if err != nil { log.Error("failed to get range", "prefix", string(prefix), "err", err) diff --git a/store/multi_writer_app_store_test.go b/store/multi_writer_app_store_test.go index 39f4fa04ad..f08d244716 100644 --- a/store/multi_writer_app_store_test.go +++ b/store/multi_writer_app_store_test.go @@ -1,6 +1,7 @@ package store import ( + "bytes" "testing" "github.com/gogo/protobuf/proto" @@ -8,7 +9,6 @@ import ( "github.com/loomnetwork/go-loom/util" "github.com/loomnetwork/loomchain/db" "github.com/loomnetwork/loomchain/log" - "github.com/pkg/errors" "github.com/stretchr/testify/suite" ) @@ -24,16 +24,118 @@ func TestMultiWriterAppStoreTestSuite(t *testing.T) { suite.Run(t, new(MultiWriterAppStoreTestSuite)) } -func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapshotFlushInterval() { +func (m *MultiWriterAppStoreTestSuite) TestEnableDisableMultiWriterAppStore() { + require := m.Require() + store, err := mockMultiWriterStore(10) + require.NoError(err) + + // vm keys should be written to both the IAVL & EVM store + store.Set(evmDBFeatureKey, []byte{}) + store.Set(vmPrefixKey("abcd"), []byte("hello")) + store.Set(vmPrefixKey("abcde"), []byte("world")) + store.Set(vmPrefixKey("evmStore"), []byte("yes")) + store.Set(vmPrefixKey("aaaa"), []byte("yes")) + store.Set([]byte("abcd"), []byte("NewData")) + + rangeData := store.Range(vmPrefix) + require.Equal(4, len(rangeData)) + require.True(store.Has([]byte("abcd"))) + + // vm keys should now only be written to the EVM store + store.Set(evmDBFeatureKey, []byte{1}) + store.Set(vmPrefixKey("gg"), []byte("world")) + store.Set(vmPrefixKey("dd"), []byte("yes")) + store.Set(vmPrefixKey("vv"), []byte("yes")) + store.Set([]byte("dcba"), []byte("MoreData")) + + rangeData = store.Range(vmPrefix) + require.Equal(7, len(rangeData)) + require.True(store.Has([]byte("abcd"))) + require.True(store.Has([]byte("dcba"))) +} + +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreDelete() { + require := m.Require() + store, err := mockMultiWriterStore(10) + require.NoError(err) + + // vm keys should be written to both the IAVL & EVM store + store.Set(evmDBFeatureKey, []byte{}) + store.Set(vmPrefixKey("abcd"), []byte("hello")) + store.Set(vmPrefixKey("abcde"), []byte("world")) + store.Set(vmPrefixKey("evmStore"), []byte("yes")) + store.Set(vmPrefixKey("aaaa"), []byte("yes")) + store.Set([]byte("vmroot"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("abcd"), []byte("NewData")) + + store.Delete(vmPrefixKey("abcd")) + require.False(store.Has(vmPrefixKey("abcd"))) + + rangeData := store.Range(vmPrefix) + require.Equal(3, len(rangeData)) + require.True(store.Has([]byte("vmroot"))) + require.True(store.Has([]byte("abcd"))) + + // vm keys should be written to the EVM store + store.Set(evmDBFeatureKey, []byte{1}) + rangeData = store.Range(vmPrefix) + require.Equal(3, len(rangeData)) + require.Equal([]byte("SSSSSSSSSSSSS"), store.Get([]byte("vmroot"))) + + store.Set(vmPrefixKey("gg"), []byte("world")) + store.Set(vmPrefixKey("dd"), []byte("yes")) + store.Set(vmPrefixKey("vv"), []byte("yes")) + store.Delete(vmPrefixKey("vv")) + require.False(store.Has(vmPrefixKey("vv"))) + + rangeData = store.Range(vmPrefix) + require.Equal(5, len(rangeData)) + require.True(store.Has([]byte("abcd"))) +} + +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapShot() { + require := m.Require() + store, err := mockMultiWriterStore(10) + require.NoError(err) + + store.Set(evmDBFeatureKey, []byte{1}) + store.Set(vmPrefixKey("abcd"), []byte("hello")) + store.Set(vmPrefixKey("abcde"), []byte("world")) + store.Set(vmPrefixKey("evmStore"), []byte("yes")) + store.Set(vmPrefixKey("aaaa"), []byte("yes")) + store.Set([]byte("ssssvvv"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("abcd"), []byte("NewData")) + _, _, err = store.SaveVersion() + require.NoError(err) + + store.Set(vmPrefixKey("abcd"), []byte("hellooooooo")) + store.Set(vmPrefixKey("abcde"), []byte("vvvvvvvvv")) + store.Set([]byte("abcd"), []byte("asdfasdf")) + + snapshot := store.GetSnapshot() + require.Equal([]byte("hello"), snapshot.Get(vmPrefixKey("abcd"))) + require.Equal([]byte("NewData"), snapshot.Get([]byte("abcd"))) + require.Equal([]byte("world"), snapshot.Get(vmPrefixKey("abcde"))) + + _, _, err = store.SaveVersion() + require.NoError(err) + + snapshot = store.GetSnapshot() + require.Equal([]byte("asdfasdf"), snapshot.Get([]byte("abcd"))) + require.Equal([]byte("hellooooooo"), snapshot.Get(vmPrefixKey("abcd"))) + require.Equal([]byte("vvvvvvvvv"), snapshot.Get(vmPrefixKey("abcde"))) +} + +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapShotFlushInterval() { require := m.Require() // flush data to disk every 2 blocks - store, err := mockMultiWriterStore(2, 2) + store, err := mockMultiWriterStore(2) require.NoError(err) // the first version go to memory store.Set([]byte("test1"), []byte("test1")) store.Set([]byte("test2"), []byte("test2")) - _, version, err := store.SaveVersion(nil) + _, version, err := store.SaveVersion() require.NoError(err) require.Equal(int64(1), version) @@ -41,18 +143,16 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapshotFlushInter store.Set([]byte("test2"), []byte("test2v2")) // this snapshot is from memory - snapshotv1, err := store.GetSnapshotAt(0) - require.NoError(err) + snapshotv1 := store.GetSnapshot() require.Equal([]byte("test1"), snapshotv1.Get([]byte("test1"))) require.Equal([]byte("test2"), snapshotv1.Get([]byte("test2"))) // this flushes all data to disk - _, _, err = store.SaveVersion(nil) + _, _, err = store.SaveVersion() require.NoError(err) // get snapshotv2 - snapshotv2, err := store.GetSnapshotAt(0) - require.NoError(err) + snapshotv2 := store.GetSnapshot() require.Equal([]byte("test1v2"), snapshotv2.Get([]byte("test1"))) require.Equal([]byte("test2v2"), snapshotv2.Get([]byte("test2"))) @@ -61,12 +161,67 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapshotFlushInter require.Equal([]byte("test2"), snapshotv1.Get([]byte("test2"))) } +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapShotRange() { + require := m.Require() + store, err := mockMultiWriterStore(10) + require.NoError(err) + + store.Set(evmDBFeatureKey, []byte{1}) + store.Set(vmPrefixKey("abcd"), []byte("hello")) + store.Set(vmPrefixKey("abcde"), []byte("world")) + store.Set(vmPrefixKey("evmStore"), []byte("yes")) + store.Set(vmPrefixKey("aaaa"), []byte("yes")) + store.Set([]byte("ssssvvv"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("abcd"), []byte("NewData")) + store.Set([]byte("uuuu"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("sssss"), []byte("NewData")) + + snapshot := store.GetSnapshot() + rangeData := snapshot.Range(vmPrefix) + require.Equal(0, len(rangeData)) + _, _, err = store.SaveVersion() + require.NoError(err) + + snapshot = store.GetSnapshot() + rangeData = snapshot.Range(vmPrefix) + require.Equal(4+1, len(rangeData)) // +1 for evm root stored by EVM store + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcd")), []byte("hello"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcde")), []byte("world"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("evmStore")), []byte("yes"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("aaaa")), []byte("yes"))) + + // Modifications shouldn't be visible in the snapshot until the next SaveVersion() + store.Delete(vmPrefixKey("abcd")) + store.Delete([]byte("ssssvvv")) + + snapshot = store.GetSnapshot() + rangeData = snapshot.Range(vmPrefix) + require.Equal(4+1, len(rangeData)) // +1 for evm root stored by EVM store + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcd")), []byte("hello"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcde")), []byte("world"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("evmStore")), []byte("yes"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("aaaa")), []byte("yes"))) + + _, _, err = store.SaveVersion() + require.NoError(err) + + snapshot = store.GetSnapshot() + rangeData = snapshot.Range(vmPrefix) + require.Equal(3+1, len(rangeData)) // +1 for evm root stored by EVM store + require.Equal(0, len(snapshot.Get(vmPrefixKey("abcd")))) // has been deleted + require.Equal(0, len(snapshot.Get([]byte("ssssvvv")))) // has been deleted + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcde")), []byte("world"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("evmStore")), []byte("yes"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("aaaa")), []byte("yes"))) +} + func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSaveVersion() { require := m.Require() - store, err := mockMultiWriterStore(10, -1) + store, err := mockMultiWriterStore(10) require.NoError(err) - // all keys (including vm keys) should be written to the IAVL store + // vm keys should be written to the EVM store + store.Set(evmDBFeatureKey, []byte{1}) store.Set(vmPrefixKey("abcd"), []byte("hello")) store.Set(vmPrefixKey("abcde"), []byte("world")) store.Set(vmPrefixKey("evmStore"), []byte("yes")) @@ -77,7 +232,7 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSaveVersion() { store.Set(vmPrefixKey("dd"), []byte("yes")) store.Set(vmPrefixKey("vv"), []byte("yes")) - _, version, err := store.SaveVersion(nil) + _, version, err := store.SaveVersion() require.Equal(int64(1), version) require.NoError(err) @@ -87,9 +242,9 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSaveVersion() { store.Delete(vmPrefixKey("gg")) dataRange := store.Range(vmPrefix) - require.Equal(6, len(dataRange)) + require.Equal(6+1, len(dataRange)) // +1 is for the evm root that written by the EVM store itself - _, version, err = store.SaveVersion(nil) + _, version, err = store.SaveVersion() require.Equal(int64(2), version) require.NoError(err) @@ -100,7 +255,7 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSaveVersion() { func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { require := m.Require() - store, err := mockMultiWriterStore(10, 10) + store, err := mockMultiWriterStore(10) require.NoError(err) // write some vm keys to iavl store @@ -112,14 +267,11 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { iavlStore.Set(vmPrefixKey("gg"), []byte("world")) iavlStore.Set(vmPrefixKey("dd"), []byte("yes")) iavlStore.Set(vmPrefixKey("vv"), []byte("yes")) - _, version, err := store.SaveVersion(nil) - require.NoError(err) + _, version, err := store.SaveVersion() require.Equal(int64(1), version) - require.Equal(version, iavlStore.Version()) - _, evmStoreVer := store.evmStore.Version() - require.Equal(version, evmStoreVer) + require.NoError(err) - newStore, err := NewMultiWriterAppStore(iavlStore, store.evmStore) + newStore, err := NewMultiWriterAppStore(iavlStore, store.evmStore, false) require.NoError(err) rangeData := iavlStore.Range([]byte("vm")) @@ -134,7 +286,7 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { // prune VM keys // NOTE: only 3 vm keys will actually get pruned due to the quirkiness of RangeWithLimit - _, version, err = newStore.SaveVersion(nil) + _, version, err = newStore.SaveVersion() require.Equal(int64(2), version) require.NoError(err) @@ -144,7 +296,7 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { // prune VM keys // NOTE: once again only 3 vm keys will get pruned - _, version, err = newStore.SaveVersion(nil) + _, version, err = newStore.SaveVersion() require.Equal(int64(3), version) require.NoError(err) @@ -153,7 +305,7 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { require.Equal(1, len(rangeData)) // prune VM keys - _, version, err = newStore.SaveVersion(nil) + _, version, err = newStore.SaveVersion() require.Equal(int64(4), version) require.NoError(err) @@ -164,7 +316,7 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { func (m *MultiWriterAppStoreTestSuite) TestIAVLRangeWithlimit() { require := m.Require() - store, err := mockMultiWriterStore(10, 10) + store, err := mockMultiWriterStore(10) require.NoError(err) // write some vm keys to iavl store @@ -176,7 +328,7 @@ func (m *MultiWriterAppStoreTestSuite) TestIAVLRangeWithlimit() { iavlStore.Set(vmPrefixKey("gg"), []byte("world")) iavlStore.Set(vmPrefixKey("dd"), []byte("yes")) iavlStore.Set(vmPrefixKey("vv"), []byte("yes")) - _, _, err = store.SaveVersion(nil) + _, _, err = store.SaveVersion() require.NoError(err) // only 4 VM keys will be returned due to the quirkiness of RangeWithLimit @@ -184,56 +336,15 @@ func (m *MultiWriterAppStoreTestSuite) TestIAVLRangeWithlimit() { require.Equal(4, len(rangeData)) } -func (m *MultiWriterAppStoreTestSuite) TestStoreRange() { - require := m.Require() - mws, err := mockMultiWriterStore(0, 0) - require.NoError(err) - prefixes, entries := populateStore(mws) - verifyRange(require, "MultiWriterAppStore", mws, prefixes, entries) - _, _, err = mws.SaveVersion(nil) - require.NoError(err) - verifyRange(require, "MultiWriterAppStore", mws, prefixes, entries) -} - -func (m *MultiWriterAppStoreTestSuite) TestSnapshotRange() { - require := m.Require() - mws, err := mockMultiWriterStore(0, 0) - require.NoError(err) - prefixes, entries := populateStore(mws) - verifyRange(require, "MultiWriterAppStore", mws, prefixes, entries) - mws.SaveVersion(nil) - - // snapshot should see all the data that was saved to disk - func() { - snap, err := mws.GetSnapshotAt(0) - require.NoError(err) - defer snap.Release() - - verifyRange(require, "MultiWriterAppStoreSnapshot", snap, prefixes, entries) - }() -} - -func (m *MultiWriterAppStoreTestSuite) TestConcurrentSnapshots() { - require := m.Require() - mws, err := mockMultiWriterStore(0, 0) - require.NoError(err) - verifyConcurrentSnapshots(require, mws) -} - -func mockMultiWriterStore(appStoreFlushInterval, evmStoreFlushInterval int64) (*MultiWriterAppStore, error) { - // Using different flush intervals for the app & evm stores is not supported. - if appStoreFlushInterval > 0 && evmStoreFlushInterval > 0 && appStoreFlushInterval != evmStoreFlushInterval { - return nil, errors.New("positive flush intervals must be consistent") - } - +func mockMultiWriterStore(flushInterval int64) (*MultiWriterAppStore, error) { memDb, _ := db.LoadMemDB() - iavlStore, err := NewIAVLStore(memDb, 0, 0, appStoreFlushInterval) + iavlStore, err := NewIAVLStore(memDb, 0, 0, flushInterval) if err != nil { return nil, err } memDb, _ = db.LoadMemDB() - evmStore := NewEvmStore(memDb, 100, evmStoreFlushInterval) - multiWriterStore, err := NewMultiWriterAppStore(iavlStore, evmStore) + evmStore := NewEvmStore(memDb, 100) + multiWriterStore, err := NewMultiWriterAppStore(iavlStore, evmStore, false) if err != nil { return nil, err } diff --git a/store/pruning_iavlstore.go b/store/pruning_iavlstore.go new file mode 100644 index 0000000000..4e73cbad31 --- /dev/null +++ b/store/pruning_iavlstore.go @@ -0,0 +1,271 @@ +package store + +import ( + "fmt" + "runtime" + "sync" + "time" + + "github.com/go-kit/kit/metrics" + kitprometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/loomnetwork/go-loom" + "github.com/loomnetwork/go-loom/plugin" + "github.com/loomnetwork/loomchain/log" + "github.com/pkg/errors" + stdprometheus "github.com/prometheus/client_golang/prometheus" + dbm "github.com/tendermint/tendermint/libs/db" +) + +var ( + pruneDuration metrics.Histogram + deleteVersionDuration metrics.Histogram +) + +func init() { + const namespace = "loomchain" + const subsystem = "pruning_iavl_store" + + pruneDuration = kitprometheus.NewSummaryFrom( + stdprometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "prune_duration", + Help: "How long PruningIAVLStore.prune() took to execute (in seconds)", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, []string{"error"}) + deleteVersionDuration = kitprometheus.NewSummaryFrom( + stdprometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "delete_version_duration", + Help: "How long it took to delete a single version from the IAVL store (in seconds)", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, []string{"error"}) +} + +type PruningIAVLStoreConfig struct { + MaxVersions int64 // maximum number of versions to keep when pruning + BatchSize int64 // maximum number of versions to delete in each cycle + FlushInterval int64 // number of versions before flushing to disk + Interval time.Duration + Logger *loom.Logger +} + +// PruningIAVLStore is a specialized IAVLStore that has a background thread that periodically prunes +// old versions. It should only be used to prune old clusters, on new clusters nodes will delete +// a version each time they save a new one, so the background thread, and all the extra locking +// is unnecessary. +type PruningIAVLStore struct { + store *IAVLStore + mutex *sync.RWMutex + oldestVer int64 + maxVersions int64 + batchSize int64 + batchCount uint64 + logger *loom.Logger +} + +// NewPruningIAVLStore creates a new PruningIAVLStore. +// maxVersions can be used to specify how many versions should be retained, if set to zero then +// old versions will never been deleted. +func NewPruningIAVLStore(db dbm.DB, cfg PruningIAVLStoreConfig) (*PruningIAVLStore, error) { + // always keep at least 2 of the latest versions + maxVersions := cfg.MaxVersions + if (maxVersions != 0) && (maxVersions < 2) { + maxVersions = 2 + } + + store, err := NewIAVLStore(db, maxVersions, 0, cfg.FlushInterval) + if err != nil { + return nil, err + } + + s := &PruningIAVLStore{ + store: store, + mutex: &sync.RWMutex{}, + maxVersions: maxVersions, + batchSize: cfg.BatchSize, + logger: cfg.Logger, + } + + if s.logger == nil { + s.logger = log.Default + } + + if maxVersions != 0 { + latestVer := store.Version() + + oldestVer := int64(0) + if cfg.BatchSize > 1 { + for i := int64(1); i <= latestVer; i++ { + if store.tree.VersionExists(i) { + oldestVer = i + break + } + } + } + s.oldestVer = oldestVer + + go s.loopWithInterval(s.prune, cfg.Interval) + } + + return s, nil +} + +func (s *PruningIAVLStore) Delete(key []byte) { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.store.Delete(key) +} + +func (s *PruningIAVLStore) Set(key, val []byte) { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.store.Set(key, val) +} + +func (s *PruningIAVLStore) Has(key []byte) bool { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Has(key) +} + +func (s *PruningIAVLStore) Get(key []byte) []byte { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Get(key) +} + +func (s *PruningIAVLStore) Range(prefix []byte) plugin.RangeData { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Range(prefix) +} + +func (s *PruningIAVLStore) Hash() []byte { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.store.Hash() +} + +func (s *PruningIAVLStore) Version() int64 { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Version() +} + +func (s *PruningIAVLStore) SaveVersion() ([]byte, int64, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + hash, ver, err := s.store.SaveVersion() + if err == nil && s.oldestVer == 0 { + s.oldestVer = ver + } + return hash, ver, err +} + +func (s *PruningIAVLStore) Prune() error { + // pruning is done in the goroutine, so do nothing here + return nil +} + +func (s *PruningIAVLStore) GetSnapshot() Snapshot { + // This isn't an actual snapshot obviously, and never will be, but lets pretend... + return &pruningIAVLStoreSnapshot{ + PruningIAVLStore: s, + } +} + +func (s *PruningIAVLStore) prune() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + var err error + defer func(begin time.Time) { + lvs := []string{"error", fmt.Sprint(err != nil)} + pruneDuration.With(lvs...).Observe(time.Since(begin).Seconds()) + }(time.Now()) + + latestVer := s.store.Version() + endVer := latestVer - s.maxVersions + + if (s.oldestVer == 0) || (s.oldestVer > endVer) { + return nil // nothing to prune yet + } + + if (endVer - s.oldestVer) > s.batchSize { + endVer = s.oldestVer + s.batchSize + } + + if endVer > (latestVer - 2) { + endVer = latestVer - 2 + } + + for i := s.oldestVer; i <= endVer; i++ { + if s.store.tree.VersionExists(i) { + if err = s.deleteVersion(i); err != nil { + return errors.Wrapf(err, "failed to delete tree version %d", i) + } + } + s.oldestVer++ + } + + s.batchCount++ + return nil +} + +func (s *PruningIAVLStore) deleteVersion(ver int64) error { + var err error + defer func(begin time.Time) { + lvs := []string{"error", fmt.Sprint(err != nil)} + deleteVersionDuration.With(lvs...).Observe(time.Since(begin).Seconds()) + }(time.Now()) + + err = s.store.tree.DeleteVersion(ver) + return err +} + +// runWithRecovery should run in a goroutine, it will ensure the given function keeps on running in +// a goroutine as long as it doesn't panic due to a runtime error. +//[MGC] I believe this function shouldn't be used as we should just fail fast if this breaks +func (s *PruningIAVLStore) runWithRecovery(run func()) { + defer func() { + if r := recover(); r != nil { + s.logger.Error("Recovered from panic in PruningIAVLStore goroutine", "r", r) + // Unless it's a runtime error restart the goroutine + if _, ok := r.(runtime.Error); !ok { + time.Sleep(30 * time.Second) + s.logger.Info("Restarting PruningIAVLStore goroutine...\n") + go s.runWithRecovery(run) + } + } + }() + run() +} + +// loopWithInterval will execute the step function in an endless loop, sleeping for the specified +// interval at the end of each loop iteration. +func (s *PruningIAVLStore) loopWithInterval(step func() error, interval time.Duration) { + for { + if err := step(); err != nil { + s.logger.Error("PruneIAVLStore encountered an error", "err", err) + } + time.Sleep(interval) + } +} + +type pruningIAVLStoreSnapshot struct { + *PruningIAVLStore +} + +func (s *pruningIAVLStoreSnapshot) Release() { + // noop +} diff --git a/store/store.go b/store/store.go index de08f6d7d2..f2aaae4320 100644 --- a/store/store.go +++ b/store/store.go @@ -46,21 +46,14 @@ type Snapshot interface { Release() } -// VersionedKVStoreSaveOptions contains options that can be passed into VersionedKVStore.SaveVersion() -type VersionedKVStoreSaveOptions struct { - // Overrides the interval at which versions are flushed to disk, not all stores may support this. - FlushInterval int64 -} - type VersionedKVStore interface { KVStore Hash() []byte Version() int64 - // Saves changes made since the last version, the options may be nil - SaveVersion(options *VersionedKVStoreSaveOptions) ([]byte, int64, error) + SaveVersion() ([]byte, int64, error) // Delete old version of the store Prune() error - GetSnapshotAt(version int64) (Snapshot, error) + GetSnapshot() Snapshot } type cacheItem struct { diff --git a/store/store_test.go b/store/store_test.go index 9495b6ca45..a2c3841ef5 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -5,6 +5,7 @@ import ( "fmt" "sync" "testing" + "time" "github.com/loomnetwork/go-loom/plugin" "github.com/loomnetwork/go-loom/util" @@ -184,6 +185,8 @@ type StoreTestSuite struct { suite.Suite store VersionedKVStore StoreName string + //nolint:unused,structcheck + supportsSnapshots bool } func populateStore(s KVWriter) ([][]byte, []*plugin.RangeEntry) { @@ -217,9 +220,8 @@ func populateStore(s KVWriter) ([][]byte, []*plugin.RangeEntry) { return prefixes, entries } -func verifyRange( - require *require.Assertions, storeName string, s KVReader, prefixes [][]byte, entries []*plugin.RangeEntry, -) { +func (ts *StoreTestSuite) VerifyRange(s KVReader, prefixes [][]byte, entries []*plugin.RangeEntry) { + require := ts.Require() // TODO: This passed before the last Tendermint upgrade, doesn't anymore, figure out why. /* expected := []*plugin.RangeEntry{ @@ -236,8 +238,8 @@ func verifyRange( } */ require.Len(s.Range([]byte("abc123")), 1) - require.EqualValues([]byte{}, s.Range([]byte("abc123"))[0].Key, storeName) - require.EqualValues(entries[1].Value, s.Range([]byte("abc123"))[0].Value, storeName) + require.EqualValues([]byte{}, s.Range([]byte("abc123"))[0].Key, ts.StoreName) + require.EqualValues(entries[1].Value, s.Range([]byte("abc123"))[0].Value, ts.StoreName) key2, err := util.UnprefixKey(entries[2].Key, prefixes[0]) require.NoError(err) @@ -252,10 +254,10 @@ func verifyRange( {key4, entries[4].Value}, } actual := s.Range(prefixes[0]) - require.Len(actual, len(expected), storeName) - if storeName != "MemStore" { + require.Len(actual, len(expected), ts.StoreName) + if ts.StoreName != "MemStore" { for i := range expected { - require.EqualValues(expected[i], actual[i], storeName) + require.EqualValues(expected[i], actual[i], ts.StoreName) } } @@ -274,12 +276,12 @@ func verifyRange( {key8, entries[8].Value}, } actual = s.Range(prefixes[1]) - require.Len(actual, len(expected), storeName) + require.Len(actual, len(expected), ts.StoreName) // TODO: MemStore keys should be iterated in ascending order - if storeName != "MemStore" { + if ts.StoreName != "MemStore" { for i := range expected { - require.EqualValues(expected[i], actual[i], storeName) + require.EqualValues(expected[i], actual[i], ts.StoreName) } } @@ -294,10 +296,10 @@ func verifyRange( {key10, entries[10].Value}, } actual = s.Range(prefixes[2]) - require.Len(actual, len(expected), storeName) - if storeName != "MemStore" { + require.Len(actual, len(expected), ts.StoreName) + if ts.StoreName != "MemStore" { for i := range expected { - require.EqualValues(expected[i], actual[i], storeName) + require.EqualValues(expected[i], actual[i], ts.StoreName) } } } @@ -305,13 +307,14 @@ func verifyRange( func (ts *StoreTestSuite) TestStoreRange() { require := ts.Require() prefixes, entries := populateStore(ts.store) - verifyRange(require, ts.StoreName, ts.store, prefixes, entries) - _, _, err := ts.store.SaveVersion(nil) + ts.VerifyRange(ts.store, prefixes, entries) + _, _, err := ts.store.SaveVersion() require.NoError(err) - verifyRange(require, ts.StoreName, ts.store, prefixes, entries) + ts.VerifyRange(ts.store, prefixes, entries) } -func verifyConcurrentSnapshots(require *require.Assertions, s VersionedKVStore) { +func (ts *StoreTestSuite) VerifyConcurrentSnapshots() { + require := ts.Require() // start one writer go-routine and a bunch of reader go-routines var wg sync.WaitGroup numOps := 10000 @@ -322,13 +325,13 @@ func verifyConcurrentSnapshots(require *require.Assertions, s VersionedKVStore) defer wg.Done() for i := 0; i < numOps; i++ { - s.Set([]byte(fmt.Sprintf("key/%d", i)), []byte(fmt.Sprintf("value/%d", i))) + ts.store.Set([]byte(fmt.Sprintf("key/%d", i)), []byte(fmt.Sprintf("value/%d", i))) if i%10 == 0 { - _, _, err := s.SaveVersion(nil) + _, _, err := ts.store.SaveVersion() require.NoError(err) } } - _, _, err := s.SaveVersion(nil) + _, _, err := ts.store.SaveVersion() require.NoError(err) }() wg.Wait() @@ -345,9 +348,7 @@ func verifyConcurrentSnapshots(require *require.Assertions, s VersionedKVStore) if snap != nil { snap.Release() } - var err error - snap, err = s.GetSnapshotAt(0) - require.NoError(err) + snap = ts.store.GetSnapshot() } snap.Get([]byte(fmt.Sprintf("key/%d", i))) } @@ -374,6 +375,7 @@ type IAVLStoreTestSuite struct { func (ts *IAVLStoreTestSuite) SetupSuite() { ts.StoreName = "IAVLStore" + ts.supportsSnapshots = true } // runs before each test in this suite @@ -385,6 +387,35 @@ func (ts *IAVLStoreTestSuite) SetupTest() { require.NoError(err) } +func (ts *IAVLStoreTestSuite) TestSnapshotRange() { + prefixes, entries := populateStore(ts.store) + ts.VerifyRange(ts.store, prefixes, entries) + + // snapshot shouldn't see data that hasn't been saved to disk, + // but this store doesn't have real snapshots so the snapshot is expected to contain the same + // unsaved state as the store itself... + func() { + snap := ts.store.GetSnapshot() + defer snap.Release() + + ts.VerifyRange(snap, prefixes, entries) + }() + + ts.store.SaveVersion() + + // snapshot should see all the data that was saved to disk + func() { + snap := ts.store.GetSnapshot() + defer snap.Release() + + ts.VerifyRange(snap, prefixes, entries) + }() +} + +func (ts *IAVLStoreTestSuite) TestConcurrentSnapshots() { + ts.VerifyConcurrentSnapshots() +} + // // MemStore - broken in various ways, dunno why we even have this. // @@ -404,6 +435,141 @@ func (ts *MemStoreTestSuite) SetupTest() { func (ts *MemStoreTestSuite) SetupSuite() { ts.StoreName = "MemStore" + ts.supportsSnapshots = false +} + +// +// PruningIAVLStore +// + +func TestPruningIAVLStoreBatching(t *testing.T) { + db := dbm.NewMemDB() + cfg := PruningIAVLStoreConfig{ + MaxVersions: 5, + BatchSize: 5, + Interval: 1 * time.Second, + } + store, err := NewPruningIAVLStore(db, cfg) + require.NoError(t, err) + + require.Equal(t, int64(0), store.oldestVer) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + } // 12 items + + curVer := int64(1) + for _, kv := range values { + store.Set(kv.key, kv.val) + _, ver, err := store.SaveVersion() + require.NoError(t, err) + require.Equal(t, curVer, ver) + curVer++ + } + + time.Sleep(5 * time.Second) + + require.True(t, store.Version() > cfg.MaxVersions) + require.Equal(t, store.Version(), store.oldestVer+cfg.MaxVersions-1, "correct number of versions has been kept") + require.Equal(t, uint64(2), store.batchCount, "correct number of batches has been pruned") + + prevOldestVer := store.oldestVer + + store, err = NewPruningIAVLStore(db, cfg) + require.NoError(t, err) + + // the oldest version shouldn't change when the IAVL store is reloaded + require.Equal(t, prevOldestVer, store.oldestVer) +} + +func TestPruningIAVLStoreKeepsAtLeastTwoVersions(t *testing.T) { + cfg := PruningIAVLStoreConfig{ + MaxVersions: 1, + BatchSize: 5, + Interval: 1 * time.Second, + } + store, err := NewPruningIAVLStore(dbm.NewMemDB(), cfg) + require.NoError(t, err) + require.Equal(t, int64(0), store.Version()) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + } + + for i, kv := range values { + if i == 2 { + break + } + + store.Set(kv.key, kv.val) + _, _, err := store.SaveVersion() + require.NoError(t, err) + } + + time.Sleep(5 * time.Second) + + require.Equal(t, int64(2), store.Version()) + require.Equal(t, int64(1), store.oldestVer) + require.Equal(t, uint64(0), store.batchCount) +} + +func TestPruningIAVLStoreKeepsAllVersionsIfMaxVersionsIsZero(t *testing.T) { + cfg := PruningIAVLStoreConfig{ + MaxVersions: 0, + BatchSize: 5, + Interval: 1 * time.Second, + } + store, err := NewPruningIAVLStore(dbm.NewMemDB(), cfg) + require.NoError(t, err) + require.Equal(t, int64(0), store.Version()) + require.Equal(t, int64(0), store.maxVersions) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + } // 12 items + + for _, kv := range values { + store.Set(kv.key, kv.val) + _, _, err := store.SaveVersion() + require.NoError(t, err) + } + + time.Sleep(4 * time.Second) + + require.Equal(t, int64(12), store.Version()) + require.Equal(t, uint64(0), store.batchCount) } func TestIAVLStoreKeepsAllVersionsIfMaxVersionsIsZero(t *testing.T) { @@ -432,9 +598,62 @@ func TestIAVLStoreKeepsAllVersionsIfMaxVersionsIsZero(t *testing.T) { for _, kv := range values { store.Set(kv.key, kv.val) - _, _, err := store.SaveVersion(nil) + _, _, err := store.SaveVersion() require.NoError(t, err) } require.Equal(t, int64(12), store.Version()) } + +func TestSwitchFromIAVLStoreToPruningIAVLStore(t *testing.T) { + memDB := dbm.NewMemDB() + store1, err := NewIAVLStore(memDB, 0, 0, 0) + require.NoError(t, err) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + } // 12 items + + for _, kv := range values { + store1.Set(kv.key, kv.val) + _, _, err := store1.SaveVersion() + require.NoError(t, err) + } + + require.Equal(t, int64(12), store1.Version()) + + store2, err := NewIAVLStore(memDB, 11, 0, 0) + require.NoError(t, err) + // force the store to prune an old version + store2.Set(key1, val1) + _, _, err = store2.SaveVersion() + require.NoError(t, err) + + require.Equal(t, int64(13), store2.Version()) + + cfg := PruningIAVLStoreConfig{ + MaxVersions: 5, + BatchSize: 5, + Interval: 1 * time.Second, + } + store3, err := NewPruningIAVLStore(memDB, cfg) + require.NoError(t, err) + + time.Sleep(4 * time.Second) + + require.Equal(t, (store3.Version()-cfg.MaxVersions)+1, store3.oldestVer) +} diff --git a/store/versioned_cachingstore.go b/store/versioned_cachingstore.go index 2f63ce6121..3d0d914a01 100644 --- a/store/versioned_cachingstore.go +++ b/store/versioned_cachingstore.go @@ -6,13 +6,13 @@ import ( "strconv" "strings" "sync" - "sync/atomic" "time" "github.com/allegro/bigcache" "github.com/go-kit/kit/metrics" kitprometheus "github.com/go-kit/kit/metrics/prometheus" loom "github.com/loomnetwork/go-loom" + "github.com/pkg/errors" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -360,38 +360,25 @@ func (c *versionedCachingStore) Set(key, val []byte) { c.VersionedKVStore.Set(key, val) } -func (c *versionedCachingStore) SaveVersion(opts *VersionedKVStoreSaveOptions) ([]byte, int64, error) { - hash, version, err := c.VersionedKVStore.SaveVersion(opts) +func (c *versionedCachingStore) SaveVersion() ([]byte, int64, error) { + hash, version, err := c.VersionedKVStore.SaveVersion() if err == nil { - if err = c.cache.Set(rootKey, GetEVMRootFromAppStore(c.VersionedKVStore), version); err != nil { - // Only log error and dont error out - cacheErrors.With("cache_operation", "set").Add(1) - c.logger.Error("[VersionedCachingStore] error while caching EVM root", "err", err) - } // Cache version is always 1 block ahead of KV store version, that way when - // GetSnapshotAt(0) is called it won't return the current unpersisted state of the cache, + // GetSnapshot() is called it won't return the current unpersisted state of the cache, // but rather the last persisted version. - // GetSnapshotAt may be called concurrently so the version must be updated atomically. - atomic.StoreInt64(&c.version, version+1) + c.version = version + 1 } return hash, version, err } -func (c *versionedCachingStore) GetSnapshotAt(version int64) (Snapshot, error) { - if version == 0 { - version = atomic.LoadInt64(&c.version) - 1 - } - - snapshot, err := c.VersionedKVStore.GetSnapshotAt(version) - if err != nil { - return nil, err - } - return newVersionedCachingStoreSnapshot(snapshot, c.cache, version, c.logger), nil +func (c *versionedCachingStore) GetSnapshot() Snapshot { + return newVersionedCachingStoreSnapshot( + c.VersionedKVStore.GetSnapshot(), + c.cache, c.version-1, c.logger, + ) } -// versionedCachingStoreSnapshot is a read-only CachingStore with specified version. -// NOTE: versionedCachingStoreSnapshot.Range is not implemented, so the underlying snapshot's Range -// implementation will be used instead. +// CachingStoreSnapshot is a read-only CachingStore with specified version type versionedCachingStoreSnapshot struct { Snapshot cache *versionedBigCache @@ -399,9 +386,8 @@ type versionedCachingStoreSnapshot struct { logger *loom.Logger } -func newVersionedCachingStoreSnapshot( - snapshot Snapshot, cache *versionedBigCache, version int64, logger *loom.Logger, -) *versionedCachingStoreSnapshot { +func newVersionedCachingStoreSnapshot(snapshot Snapshot, cache *versionedBigCache, + version int64, logger *loom.Logger) *versionedCachingStoreSnapshot { return &versionedCachingStoreSnapshot{ Snapshot: snapshot, cache: cache, @@ -410,6 +396,14 @@ func newVersionedCachingStoreSnapshot( } } +func (c *versionedCachingStoreSnapshot) Delete(key []byte) { + panic("[versionedCachingStoreSnapshot] Delete() not implemented") +} + +func (c *versionedCachingStoreSnapshot) Set(key, val []byte) { + panic("[versionedCachingStoreSnapshot] Set() not implemented") +} + func (c *versionedCachingStoreSnapshot) Has(key []byte) bool { var err error @@ -495,6 +489,14 @@ func (c *versionedCachingStoreSnapshot) Get(key []byte) []byte { return data } +func (c *versionedCachingStoreSnapshot) SaveVersion() ([]byte, int64, error) { + return nil, 0, errors.New("[VersionedCachingStoreSnapshot] SaveVersion() not implemented") +} + +func (c *versionedCachingStoreSnapshot) Prune() error { + return errors.New("[VersionedCachingStoreSnapshot] Prune() not implemented") +} + func (c *versionedCachingStoreSnapshot) Release() { c.Snapshot.Release() } diff --git a/store/versioned_cachingstore_test.go b/store/versioned_cachingstore_test.go index cd760d049c..5345b77e5d 100644 --- a/store/versioned_cachingstore_test.go +++ b/store/versioned_cachingstore_test.go @@ -3,83 +3,150 @@ package store import ( "testing" + "github.com/loomnetwork/go-loom/plugin" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +type MockStore struct { + storage map[string][]byte + version int64 +} + +func NewMockStore() *MockStore { + return &MockStore{ + storage: make(map[string][]byte), + version: 0, + } +} + +func (m *MockStore) Get(key []byte) []byte { + return m.storage[string(key)] +} + +func (m *MockStore) Has(key []byte) bool { + return m.storage[string(key)] != nil +} + +func (m *MockStore) Set(key []byte, value []byte) { + m.storage[string(key)] = value +} + +func (m *MockStore) Delete(key []byte) { + delete(m.storage, string(key)) +} + +func (m *MockStore) Range(prefix []byte) plugin.RangeData { + return nil +} + +func (m *MockStore) Hash() []byte { + return nil +} + +func (m *MockStore) Version() int64 { + return m.version +} + +func (m *MockStore) SaveVersion() ([]byte, int64, error) { + m.version = m.version + 1 + return nil, m.version, nil +} + +func (m *MockStore) Prune() error { + return nil +} + +func (m *MockStore) GetSnapshot() Snapshot { + snapshotStore := make(map[string][]byte) + for k, v := range m.storage { + snapshotStore[k] = v + } + mstore := &MockStore{ + storage: snapshotStore, + } + return &mockStoreSnapshot{ + MockStore: mstore, + } +} + +type mockStoreSnapshot struct { + *MockStore +} + +func (s *mockStoreSnapshot) Release() { + // noop +} + func TestCachingStoreVersion(t *testing.T) { defaultConfig := DefaultCachingStoreConfig() defaultConfig.CachingEnabled = true - mockStore, err := mockMultiWriterStore(0, 0) + mockStore := NewMockStore() + + versionedStore, err := NewVersionedCachingStore(mockStore, defaultConfig, mockStore.Version()) + cachingStore := versionedStore.(*versionedCachingStore) + require.NoError(t, err) key1 := []byte("key1") key2 := []byte("key2") key3 := []byte("key3") - versionedStore, err := NewVersionedCachingStore(mockStore, defaultConfig, mockStore.Version()) - require.NoError(t, err) - - versionedStore.Set(key1, []byte("value1")) - versionedStore.Set(key2, []byte("value2")) - versionedStore.Set(key3, []byte("value3")) + mockStore.Set(key1, []byte("value1")) + mockStore.Set(key2, []byte("value2")) + mockStore.Set(key3, []byte("value3")) - snapshotv0, err := versionedStore.GetSnapshotAt(0) - require.NoError(t, err) + snapshotv0 := cachingStore.GetSnapshot() - // snapshot should be empty because values haven't been persisted to the underlying store + // cachingStoreSnapshot will cache key1 in memory as version 0 cachedValue := snapshotv0.Get(key1) - assert.Equal(t, "", string(cachedValue), "snapshot should be empty") + assert.Equal(t, "value1", string(cachedValue), "cachingstore read needs to be consistent with underlying store") + // Set data directly without update the cache, caching store should return old data + mockStore.Set(key2, []byte("value2")) + cachedValue = snapshotv0.Get([]byte("key1")) + assert.Equal(t, "value1", string(cachedValue), "cachingstore need to fetch key directly from the backing store") - _, version, _ := versionedStore.SaveVersion(nil) + // save to bump up version + _, version, _ := cachingStore.SaveVersion() assert.Equal(t, int64(1), version, "version must be updated to 1") + // save data into version 1 + cachingStore.Set(key2, []byte("newvalue2")) + cachingStore.Set(key3, []byte("newvalue3")) + snapshotv1 := cachingStore.GetSnapshot() + cachedValue = snapshotv1.Get(key2) + assert.Equal(t, "newvalue2", string(cachedValue), "snapshotv1 should get correct value") + cachedValue = snapshotv1.Get(key1) + assert.Equal(t, "value1", string(cachedValue), "snapshotv1 should get correct value") - // previously obtained snapshot should still be empty since it shouldn't be affected by changes - // to the underlying store + // snapshotv0 should not get updated cachedValue = snapshotv0.Get(key1) - assert.Equal(t, "", string(cachedValue), "snapshot should be empty") + assert.Equal(t, "value1", string(cachedValue), "snapshotv0 should get correct value") + cachedValue = snapshotv0.Get(key2) + assert.Equal(t, "value2", string(cachedValue), "snapshotv0 should get correct value") + cachedValue = snapshotv0.Get(key3) + assert.Equal(t, "value3", string(cachedValue), "snapshotv0 should get correct value") - snapshotv1, err := versionedStore.GetSnapshotAt(0) - require.NoError(t, err) - - // new snapshot should contain the previously persisted values - cachedValue = snapshotv1.Get(key1) - assert.Equal(t, "value1", string(cachedValue), "value should match the one persisted to the underlying store") - // existing snapshot should be unaffected by unpersisted changes to the store - versionedStore.Set(key1, []byte("newvalue1")) - cachedValue = snapshotv1.Get(key1) - assert.Equal(t, "value1", string(cachedValue), "snapshot should not be affected by changes to the underlying store") + cacheSnapshot := snapshotv0.(*versionedCachingStoreSnapshot) + cacheSnapshot.cache.Delete(key1, 1) // evict a key + cachedValue = snapshotv0.Get(key1) // call an evicted key + assert.Equal(t, "value1", string(cachedValue), "snapshotv1 should get correct value, fetching from underlying snapshot") // save to bump up version - _, version, _ = versionedStore.SaveVersion(nil) + _, version, _ = cachingStore.SaveVersion() assert.Equal(t, int64(2), version, "version must be updated to 2") - - // existing snapshot should be unaffected by persisted changes to the store - cachedValue = snapshotv1.Get(key1) - assert.Equal(t, "value1", string(cachedValue), "snapshot should not be affected by changes to the uderlying store") - - // save data into version 3 - versionedStore.Set(key2, []byte("newvalue2")) - versionedStore.Set(key3, []byte("newvalue3")) - - _, version, _ = versionedStore.SaveVersion(nil) - assert.Equal(t, int64(3), version, "version must be updated to 3") - - snapshotv2, err := versionedStore.GetSnapshotAt(0) - require.NoError(t, err) + snapshotv2 := cachingStore.GetSnapshot() cachedValue = snapshotv2.Get(key1) - assert.Equal(t, "newvalue1", string(cachedValue)) + assert.Equal(t, "value1", string(cachedValue), "snapshotv2 should get the value from cache") cachedValue = snapshotv2.Get(key2) - assert.Equal(t, "newvalue2", string(cachedValue)) + assert.Equal(t, "newvalue2", string(cachedValue), "snapshotv2 should get the value from cache") cachedValue = snapshotv2.Get(key3) - assert.Equal(t, "newvalue3", string(cachedValue)) + assert.Equal(t, "newvalue3", string(cachedValue), "snapshotv2 should get the value from cache") - // snapshotv1 should remain unchanged - cachedValue = snapshotv1.Get(key1) - assert.Equal(t, "value1", string(cachedValue)) - cachedValue = snapshotv1.Get(key2) - assert.Equal(t, "value2", string(cachedValue)) - cachedValue = snapshotv1.Get(key3) - assert.Equal(t, "value3", string(cachedValue)) + // evict data from key table + cacheSnapshot = snapshotv1.(*versionedCachingStoreSnapshot) + cacheSnapshot.cache.cache.Delete(string(key1)) // evict a key table + cachedValue = snapshotv2.Get(key1) + assert.Equal(t, "value1", string(cachedValue), "snapshotv2 should get the value from cache") }