From 9b9559a12ada68000d359587c1bdb6ea0c5a3f5d Mon Sep 17 00:00:00 2001 From: marioevz Date: Wed, 17 Aug 2022 21:39:22 +0000 Subject: [PATCH 1/8] simulators/eth2/engine: Refactor to allow client shutdown, delayed start --- simulators/eth2/engine/engineapi.go | 2 +- simulators/eth2/engine/helper.go | 263 +++++++---- simulators/eth2/engine/nodes.go | 494 +++++++++++++++++++-- simulators/eth2/engine/prepared_testnet.go | 195 ++++---- simulators/eth2/engine/running_testnet.go | 299 +++++++++---- simulators/eth2/engine/scenarios.go | 349 +++++++++------ simulators/eth2/engine/verification.go | 66 ++- 7 files changed, 1171 insertions(+), 497 deletions(-) diff --git a/simulators/eth2/engine/engineapi.go b/simulators/eth2/engine/engineapi.go index 0c7e32ca8b..0de0ef5bc1 100644 --- a/simulators/eth2/engine/engineapi.go +++ b/simulators/eth2/engine/engineapi.go @@ -50,7 +50,7 @@ type EngineClient struct { } // NewClient creates a engine client that uses the given RPC client. -func NewEngineClient(t *hivesim.T, n *Eth1Node, ttd *big.Int) *EngineClient { +func NewEngineClient(t *hivesim.T, n *ExecutionClient, ttd *big.Int) *EngineClient { engineRPCAddress, err := n.EngineRPCAddress() if err != nil { panic(err) diff --git a/simulators/eth2/engine/helper.go b/simulators/eth2/engine/helper.go index 855590c534..35286c4b00 100644 --- a/simulators/eth2/engine/helper.go +++ b/simulators/eth2/engine/helper.go @@ -42,6 +42,7 @@ type node struct { ExecutionClientTTD *big.Int BeaconNodeTTD *big.Int TestVerificationNode bool + DisableStartup bool ChainGenerator ChainGenerator Chain []*types.Block } @@ -562,106 +563,205 @@ func forkchoiceResponseSpoof(method string, status PayloadStatusV1, payloadID *P }, nil } -// List of Hashes that can be accessed concurrently -type SyncHashes struct { - Hashes []common.Hash - Lock *sync.Mutex +type EngineResponse struct { + Status PayloadStatus + LatestValidHash *common.Hash } -func NewSyncHashes(hashes ...common.Hash) *SyncHashes { - newSyncHashes := &SyncHashes{ - Hashes: make([]common.Hash, 0), - Lock: &sync.Mutex{}, +type EngineResponseHash struct { + Response *EngineResponse + Hash common.Hash +} + +// +type EngineResponseMocker struct { + Lock sync.Mutex + DefaultResponse *EngineResponse + HashToResponse map[common.Hash]*EngineResponse + HashPassthrough map[common.Hash]bool + NewPayloadCalled chan EngineResponseHash + ForkchoiceUpdatedCalled chan EngineResponseHash + Mocking bool +} + +func NewEngineResponseMocker(defaultResponse *EngineResponse, perHashResponses ...*EngineResponseHash) *EngineResponseMocker { + e := &EngineResponseMocker{ + DefaultResponse: defaultResponse, + HashToResponse: make(map[common.Hash]*EngineResponse), + HashPassthrough: make(map[common.Hash]bool), + NewPayloadCalled: make(chan EngineResponseHash), + ForkchoiceUpdatedCalled: make(chan EngineResponseHash), + Mocking: true, } - for _, h := range hashes { - newSyncHashes.Hashes = append(newSyncHashes.Hashes, h) + for _, r := range perHashResponses { + e.AddResponse(r.Hash, r.Response) } - return newSyncHashes + return e } -func (syncHashes *SyncHashes) Contains(hash common.Hash) bool { - syncHashes.Lock.Lock() - defer syncHashes.Lock.Unlock() - if syncHashes.Hashes == nil { - return false +func (e *EngineResponseMocker) AddResponse(h common.Hash, r *EngineResponse) { + e.Lock.Lock() + defer e.Lock.Unlock() + if e.HashToResponse == nil { + e.HashToResponse = make(map[common.Hash]*EngineResponse) } - for _, h := range syncHashes.Hashes { - if h == hash { - return true - } + e.HashToResponse[h] = r +} + +func (e *EngineResponseMocker) AddPassthrough(h common.Hash, pass bool) { + e.Lock.Lock() + defer e.Lock.Unlock() + e.HashPassthrough[h] = pass +} + +func (e *EngineResponseMocker) CanPassthrough(h common.Hash) bool { + e.Lock.Lock() + defer e.Lock.Unlock() + if pass, ok := e.HashPassthrough[h]; ok && pass { + return true } return false } -func (syncHashes *SyncHashes) Add(hash common.Hash) { - syncHashes.Lock.Lock() - defer syncHashes.Lock.Unlock() - syncHashes.Hashes = append(syncHashes.Hashes, hash) +func (e *EngineResponseMocker) GetResponse(h common.Hash) *EngineResponse { + e.Lock.Lock() + defer e.Lock.Unlock() + if e.HashToResponse != nil { + if r, ok := e.HashToResponse[h]; ok { + return r + } + } + return e.DefaultResponse } -// Generate a callback that invalidates either a call to `engine_forkchoiceUpdatedV1` or `engine_newPayloadV1` -// for all hashes with given exceptions, and a given LatestValidHash. -func InvalidateExecutionPayloads(method string, exceptions *SyncHashes, latestValidHash *common.Hash, invalidated chan<- common.Hash) func([]byte, []byte) *proxy.Spoof { - if method == EngineForkchoiceUpdatedV1 { - return func(res []byte, req []byte) *proxy.Spoof { - var ( - fcState ForkchoiceStateV1 - pAttr PayloadAttributesV1 - spoof *proxy.Spoof - err error - ) - err = UnmarshalFromJsonRPCRequest(req, &fcState, &pAttr) +func (e *EngineResponseMocker) SetDefaultResponse(r *EngineResponse) { + e.Lock.Lock() + defer e.Lock.Unlock() + e.DefaultResponse = r +} + +func (e *EngineResponseMocker) AddGetPayloadPassthroughToProxy(p *Proxy) { + p.AddResponseCallback(EngineGetPayloadV1, func(res []byte, req []byte) *proxy.Spoof { + // Hash of the payload built is being added to the passthrough list + var ( + payload ExecutableDataV1 + ) + err := UnmarshalFromJsonRPCResponse(res, &payload) + if err != nil { + panic(err) + } + e.AddPassthrough(payload.BlockHash, true) + return nil + }) +} + +func (e *EngineResponseMocker) AddNewPayloadCallbackToProxy(p *Proxy) { + p.AddResponseCallback(EngineNewPayloadV1, func(res []byte, req []byte) *proxy.Spoof { + var ( + payload ExecutableDataV1 + status PayloadStatusV1 + spoof *proxy.Spoof + err error + ) + err = UnmarshalFromJsonRPCRequest(req, &payload) + if err != nil { + panic(err) + } + err = UnmarshalFromJsonRPCResponse(res, &status) + if err != nil { + panic(err) + } + if r := e.GetResponse(payload.BlockHash); e.Mocking && !e.CanPassthrough(payload.BlockHash) && r != nil { + // We are mocking this specific response, either with a hash specific response, or the default response + spoof, err = payloadStatusSpoof(EngineNewPayloadV1, &PayloadStatusV1{ + Status: r.Status, + LatestValidHash: r.LatestValidHash, + ValidationError: nil, + }) if err != nil { panic(err) } - if !exceptions.Contains(fcState.HeadBlockHash) { - spoof, err = forkchoiceResponseSpoof(EngineForkchoiceUpdatedV1, PayloadStatusV1{ - Status: Invalid, - LatestValidHash: latestValidHash, - ValidationError: nil, - }, nil) - if err != nil { - panic(err) - } - select { - case invalidated <- fcState.HeadBlockHash: - default: - } - return spoof + select { + case e.NewPayloadCalled <- EngineResponseHash{ + Response: r, + Hash: payload.BlockHash, + }: + default: + } + return spoof + } else { + select { + case e.NewPayloadCalled <- EngineResponseHash{ + Response: &EngineResponse{ + Status: status.Status, + LatestValidHash: status.LatestValidHash, + }, + Hash: payload.BlockHash, + }: + default: } - return nil } - } - if method == EngineNewPayloadV1 { - return func(res []byte, req []byte) *proxy.Spoof { - var ( - payload ExecutableDataV1 - spoof *proxy.Spoof - err error - ) - err = UnmarshalFromJsonRPCRequest(req, &payload) + return nil + }) +} + +func (e *EngineResponseMocker) AddForkchoiceUpdatedCallbackToProxy(p *Proxy) { + p.AddResponseCallback(EngineForkchoiceUpdatedV1, func(res []byte, req []byte) *proxy.Spoof { + var ( + fcState ForkchoiceStateV1 + pAttr PayloadAttributesV1 + fResp ForkChoiceResponse + spoof *proxy.Spoof + err error + ) + err = UnmarshalFromJsonRPCRequest(req, &fcState, &pAttr) + if err != nil { + panic(err) + } + err = UnmarshalFromJsonRPCResponse(res, &fResp) + if err != nil { + panic(err) + } + + if r := e.GetResponse(fcState.HeadBlockHash); e.Mocking && !e.CanPassthrough(fcState.HeadBlockHash) && r != nil { + // We are mocking this specific response, either with a hash specific response, or the default response + spoof, err = forkchoiceResponseSpoof(EngineForkchoiceUpdatedV1, PayloadStatusV1{ + Status: r.Status, + LatestValidHash: r.LatestValidHash, + ValidationError: nil, + }, nil) if err != nil { panic(err) } - if !exceptions.Contains(payload.BlockHash) { - spoof, err = payloadStatusSpoof(EngineNewPayloadV1, &PayloadStatusV1{ - Status: Invalid, - LatestValidHash: latestValidHash, - ValidationError: nil, - }) - if err != nil { - panic(err) - } - select { - case invalidated <- payload.BlockHash: - default: - } - return spoof + + select { + case e.ForkchoiceUpdatedCalled <- EngineResponseHash{ + Response: r, + Hash: fcState.HeadBlockHash, + }: + default: + } + return spoof + } else { + // Let the original response pass through + select { + case e.ForkchoiceUpdatedCalled <- EngineResponseHash{ + Response: &EngineResponse{ + Status: fResp.PayloadStatus.Status, + LatestValidHash: fResp.PayloadStatus.LatestValidHash, + }, + Hash: fcState.HeadBlockHash, + }: + default: } - return nil } - } - panic(fmt.Errorf("ERROR: Invalid method to generate callback: %s", method)) + return nil + }) +} + +func (e *EngineResponseMocker) AddCallbacksToProxy(p *Proxy) { + e.AddForkchoiceUpdatedCallbackToProxy(p) + e.AddNewPayloadCallbackToProxy(p) } // Generates a callback that detects when a ForkchoiceUpdated with Payload Attributes fails. @@ -716,6 +816,11 @@ func combine(a, b *proxy.Spoof) *proxy.Spoof { return a } +func ContextWithSlotsTimeout(parent context.Context, t *Testnet, slots beacon.Slot) (context.Context, context.CancelFunc) { + timeout := time.Duration(uint64(slots)*uint64(t.spec.SECONDS_PER_SLOT)) * time.Second + return context.WithTimeout(parent, timeout) +} + // Try to approximate how much time until the merge based on current time, bellatrix fork epoch, // TTD, execution clients' consensus mechanism, current total difficulty. // This function is used to calculate timeouts, so it will always return a pessimistic value. @@ -723,7 +828,7 @@ func SlotsUntilMerge(t *Testnet, c *Config) beacon.Slot { l := make([]beacon.Slot, 0) l = append(l, SlotsUntilBellatrix(t.genesisTime, t.spec)) - for i, e := range t.eth1 { + for i, e := range t.ExecutionClients().Running() { l = append(l, beacon.Slot(TimeUntilTerminalBlock(e, c.Eth1Consensus, c.TerminalTotalDifficulty, c.Nodes[i])/uint64(t.spec.SECONDS_PER_SLOT))) } @@ -755,7 +860,7 @@ func SlotsUntilBellatrix(genesisTime beacon.Timestamp, spec *beacon.Spec) beacon return s } -func TimeUntilTerminalBlock(e *Eth1Node, c setup.Eth1Consensus, defaultTTD *big.Int, n node) uint64 { +func TimeUntilTerminalBlock(e *ExecutionClient, c setup.Eth1Consensus, defaultTTD *big.Int, n node) uint64 { var ttd = defaultTTD if n.ExecutionClientTTD != nil { ttd = n.ExecutionClientTTD diff --git a/simulators/eth2/engine/nodes.go b/simulators/eth2/engine/nodes.go index 7fcf615a1d..4fd389ac81 100644 --- a/simulators/eth2/engine/nodes.go +++ b/simulators/eth2/engine/nodes.go @@ -3,8 +3,10 @@ package main import ( "bytes" "context" + "encoding/hex" "errors" "fmt" + "net" "net/http" "strings" "time" @@ -33,53 +35,188 @@ const ( // TODO: we assume the clients were configured with default ports. // Would be cleaner to run a script in the client to get the address without assumptions -type Eth1Node struct { - *hivesim.Client +type ExecutionClient struct { + T *hivesim.T + HiveClient *hivesim.Client + ClientType string + OptionsGenerator func() ([]hivesim.StartOption, error) + proxy **Proxy + proxyPort int } -func (en *Eth1Node) UserRPCAddress() (string, error) { - return fmt.Sprintf("http://%v:%d", en.IP, PortUserRPC), nil +func NewExecutionClient(t *hivesim.T, eth1Def *hivesim.ClientDefinition, optionsGenerator func() ([]hivesim.StartOption, error), proxyPort int) *ExecutionClient { + return &ExecutionClient{ + T: t, + ClientType: eth1Def.Name, + OptionsGenerator: optionsGenerator, + proxyPort: proxyPort, + proxy: new(*Proxy), + } +} + +func (en *ExecutionClient) UserRPCAddress() (string, error) { + return fmt.Sprintf("http://%v:%d", en.HiveClient.IP, PortUserRPC), nil } -func (en *Eth1Node) EngineRPCAddress() (string, error) { +func (en *ExecutionClient) EngineRPCAddress() (string, error) { // TODO what will the default port be? - return fmt.Sprintf("http://%v:%d", en.IP, PortEngineRPC), nil + return fmt.Sprintf("http://%v:%d", en.HiveClient.IP, PortEngineRPC), nil } -func (en *Eth1Node) MustGetEnode() string { - addr, err := en.EnodeURL() +func (en *ExecutionClient) MustGetEnode() string { + addr, err := en.HiveClient.EnodeURL() if err != nil { panic(err) } return addr } -type BeaconNode struct { - *hivesim.Client - API *eth2api.Eth2HttpClient - genesisTime common.Timestamp - spec *common.Spec - index int +func (en *ExecutionClient) Start() error { + if en.HiveClient != nil { + return fmt.Errorf("Client already started") + } + en.T.Logf("Starting client %s", en.ClientType) + opts, err := en.OptionsGenerator() + if err != nil { + return fmt.Errorf("Unable to get start options: %v", err) + } + en.HiveClient = en.T.StartClient(en.ClientType, opts...) + + // Prepare proxy + dest, _ := en.EngineRPCAddress() + + secret, err := hex.DecodeString("7365637265747365637265747365637265747365637265747365637265747365") + if err != nil { + panic(err) + } + simIP, err := en.T.Sim.ContainerNetworkIP(en.T.SuiteID, "bridge", "simulation") + if err != nil { + panic(err) + } + + *en.proxy = NewProxy(net.ParseIP(simIP), en.proxyPort, dest, secret) + return nil +} + +func (en *ExecutionClient) Shutdown() error { + _, err := en.HiveClient.Exec("shutdown.sh") + if err != nil { + return err + } + en.HiveClient = nil + return nil } -type BeaconNodes []*BeaconNode +func (en *ExecutionClient) IsRunning() bool { + return en.HiveClient != nil +} -func NewBeaconNode(cl *hivesim.Client, genesisTime common.Timestamp, spec *common.Spec, index int) *BeaconNode { - return &BeaconNode{ - Client: cl, - API: ð2api.Eth2HttpClient{ - Addr: fmt.Sprintf("http://%s:%d", cl.IP, PortBeaconAPI), - Cli: &http.Client{}, - Codec: eth2api.JSONCodec{}, - }, - genesisTime: genesisTime, - spec: spec, - index: index, +func (en *ExecutionClient) Proxy() *Proxy { + if en.proxy != nil && *en.proxy != nil { + return *en.proxy } + return nil } -func (bn *BeaconNode) ENR() (string, error) { - ctx, _ := context.WithTimeout(context.Background(), time.Second*10) +type ExecutionClients []*ExecutionClient + +// Return subset of clients that are currently running +func (all ExecutionClients) Running() ExecutionClients { + res := make(ExecutionClients, 0) + for _, ec := range all { + if ec.IsRunning() { + res = append(res, ec) + } + } + return res +} + +// Returns comma-separated Bootnodes of all running execution nodes +func (ens ExecutionClients) Enodes() (string, error) { + if len(ens) == 0 { + return "", nil + } + enodes := make([]string, 0) + for _, en := range ens { + if en.IsRunning() { + enode, err := en.HiveClient.EnodeURL() + if err != nil { + return "", err + } + enodes = append(enodes, enode) + } + } + return strings.Join(enodes, ","), nil +} + +type Proxies []**Proxy + +func (all Proxies) Running() []*Proxy { + res := make([]*Proxy, 0) + for _, p := range all { + if p != nil && *p != nil { + res = append(res, *p) + } + } + return res +} + +type BeaconClient struct { + T *hivesim.T + HiveClient *hivesim.Client + ClientType string + OptionsGenerator func() ([]hivesim.StartOption, error) + API *eth2api.Eth2HttpClient + genesisTime common.Timestamp + spec *common.Spec + index int +} + +func NewBeaconClient(t *hivesim.T, beaconDef *hivesim.ClientDefinition, optionsGenerator func() ([]hivesim.StartOption, error), genesisTime common.Timestamp, spec *common.Spec, index int) *BeaconClient { + return &BeaconClient{ + T: t, + ClientType: beaconDef.Name, + OptionsGenerator: optionsGenerator, + genesisTime: genesisTime, + spec: spec, + index: index, + } +} + +func (bn *BeaconClient) Start() error { + if bn.HiveClient != nil { + return fmt.Errorf("Client already started") + } + bn.T.Logf("Starting client %s", bn.ClientType) + opts, err := bn.OptionsGenerator() + if err != nil { + return fmt.Errorf("Unable to get start options: %v", err) + } + bn.HiveClient = bn.T.StartClient(bn.ClientType, opts...) + bn.API = ð2api.Eth2HttpClient{ + Addr: fmt.Sprintf("http://%s:%d", bn.HiveClient.IP, PortBeaconAPI), + Cli: &http.Client{}, + Codec: eth2api.JSONCodec{}, + } + return nil +} + +func (bn *BeaconClient) Shutdown() error { + _, err := bn.HiveClient.Exec("shutdown.sh") + if err != nil { + return err + } + bn.HiveClient = nil + return nil +} + +func (bn *BeaconClient) IsRunning() bool { + return bn.HiveClient != nil +} + +func (bn *BeaconClient) ENR() (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() var out eth2api.NetworkIdentity if err := nodeapi.Identity(ctx, bn.API, &out); err != nil { return "", err @@ -89,27 +226,64 @@ func (bn *BeaconNode) ENR() (string, error) { return out.ENR, nil } -func (bn *BeaconNode) EnodeURL() (string, error) { +func (bn *BeaconClient) EnodeURL() (string, error) { return "", errors.New("beacon node does not have an discv4 Enode URL, use ENR or multi-address instead") } -// Returns comma-separated ENRs of all beacon nodes -func (beacons BeaconNodes) ENRs() (string, error) { +type BeaconClients []*BeaconClient + +// Return subset of clients that are currently running +func (all BeaconClients) Running() BeaconClients { + res := make(BeaconClients, 0) + for _, bc := range all { + if bc.IsRunning() { + res = append(res, bc) + } + } + return res +} + +// Returns comma-separated ENRs of all running beacon nodes +func (beacons BeaconClients) ENRs() (string, error) { if len(beacons) == 0 { return "", nil } enrs := make([]string, 0) for _, bn := range beacons { - enr, err := bn.ENR() - if err != nil { - return "", err + if bn.IsRunning() { + enr, err := bn.ENR() + if err != nil { + return "", err + } + enrs = append(enrs, enr) } - enrs = append(enrs, enr) } return strings.Join(enrs, ","), nil } -func (b *BeaconNode) WaitForExecutionPayload(ctx context.Context, timeoutSlots common.Slot) (ethcommon.Hash, error) { +func (b *BeaconClient) PrintAllBeaconBlocks(ctx context.Context) error { + var headInfo eth2api.BeaconBlockHeaderAndInfo + if exists, err := beaconapi.BlockHeader(ctx, b.API, eth2api.BlockHead, &headInfo); err != nil { + return fmt.Errorf("PrintAllBeaconBlocks: failed to poll head: %v", err) + } else if !exists { + return fmt.Errorf("PrintAllBeaconBlocks: failed to poll head: !exists") + } + fmt.Printf("PrintAllBeaconBlocks: Head, slot %d, root %v\n", headInfo.Header.Message.Slot, headInfo.Root) + for i := 1; i <= int(headInfo.Header.Message.Slot); i++ { + var bHeader eth2api.BeaconBlockHeaderAndInfo + if exists, err := beaconapi.BlockHeader(ctx, b.API, eth2api.BlockIdSlot(i), &bHeader); err != nil { + fmt.Printf("PrintAllBeaconBlocks: Slot %d, not found\n", i) + continue + } else if !exists { + fmt.Printf("PrintAllBeaconBlocks: Slot %d, not found\n", i) + continue + } + fmt.Printf("PrintAllBeaconBlocks: Slot %d, root %v\n", i, bHeader.Root) + } + return nil +} + +func (b *BeaconClient) WaitForExecutionPayload(ctx context.Context, timeoutSlots common.Slot) (ethcommon.Hash, error) { fmt.Printf("Waiting for execution payload on beacon %d\n", b.index) slotDuration := time.Duration(b.spec.SECONDS_PER_SLOT) * time.Second timer := time.NewTicker(slotDuration) @@ -155,8 +329,65 @@ func (b *BeaconNode) WaitForExecutionPayload(ctx context.Context, timeoutSlots c } } +type BlockV2OptimisticResponse struct { + Version string `json:"version"` + ExecutionOptimistic bool `json:"execution_optimistic"` +} + +func (b *BeaconClient) CheckBlockIsOptimistic(ctx context.Context, blockID eth2api.BlockId) (bool, error) { + var headOptStatus BlockV2OptimisticResponse + if exists, err := eth2api.SimpleRequest(ctx, b.API, eth2api.FmtGET("/eth/v2/beacon/blocks/%s", blockID.BlockId()), &headOptStatus); err != nil { + return false, err + } else if !exists { + // Block still not synced + return false, fmt.Errorf("Block not found (!exists)") + } + return headOptStatus.ExecutionOptimistic, nil +} + +func (b *BeaconClient) WaitForOptimisticState(ctx context.Context, timeoutSlots common.Slot, blockID eth2api.BlockId, optimistic bool) (*eth2api.BeaconBlockHeaderAndInfo, error) { + fmt.Printf("Waiting for optimistic sync on beacon %d\n", b.index) + slotDuration := time.Duration(b.spec.SECONDS_PER_SLOT) * time.Second + timer := time.NewTicker(slotDuration) + var timeout <-chan time.Time + if timeoutSlots > 0 { + timeout = time.After(time.Second * time.Duration(uint64(timeoutSlots)*uint64(b.spec.SECONDS_PER_SLOT))) + } else { + timeout = make(<-chan time.Time) + } + + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("context done") + case <-timeout: + return nil, fmt.Errorf("Timeout") + case <-timer.C: + var headOptStatus BlockV2OptimisticResponse + if exists, err := eth2api.SimpleRequest(ctx, b.API, eth2api.FmtGET("/eth/v2/beacon/blocks/%s", blockID.BlockId()), &headOptStatus); err != nil { + // Block still not synced + continue + } else if !exists { + // Block still not synced + continue + } + if headOptStatus.ExecutionOptimistic != optimistic { + continue + } + // Return the block + var blockInfo eth2api.BeaconBlockHeaderAndInfo + if exists, err := beaconapi.BlockHeader(ctx, b.API, blockID, &blockInfo); err != nil { + return nil, fmt.Errorf("WaitForExecutionPayload: failed to poll block: %v", err) + } else if !exists { + return nil, fmt.Errorf("WaitForExecutionPayload: failed to poll block: !exists") + } + return &blockInfo, nil + } + } +} + // -func (bn *BeaconNode) GetLatestExecutionBeaconBlock(ctx context.Context) (*bellatrix.SignedBeaconBlock, error) { +func (bn *BeaconClient) GetLatestExecutionBeaconBlock(ctx context.Context) (*bellatrix.SignedBeaconBlock, error) { var headInfo eth2api.BeaconBlockHeaderAndInfo if exists, err := beaconapi.BlockHeader(ctx, bn.API, eth2api.BlockHead, &headInfo); err != nil { return nil, fmt.Errorf("failed to poll head: %v", err) @@ -182,7 +413,7 @@ func (bn *BeaconNode) GetLatestExecutionBeaconBlock(ctx context.Context) (*bella return nil, nil } -func (bn *BeaconNode) GetFirstExecutionBeaconBlock(ctx context.Context) (*bellatrix.SignedBeaconBlock, error) { +func (bn *BeaconClient) GetFirstExecutionBeaconBlock(ctx context.Context) (*bellatrix.SignedBeaconBlock, error) { lastSlot := bn.spec.TimeToSlot(common.Timestamp(time.Now().Unix()), bn.genesisTime) for slot := common.Slot(0); slot <= lastSlot; slot++ { var versionedBlock eth2api.VersionedSignedBeaconBlock @@ -203,7 +434,7 @@ func (bn *BeaconNode) GetFirstExecutionBeaconBlock(ctx context.Context) (*bellat return nil, nil } -func (bn *BeaconNode) GetBeaconBlockByExecutionHash(ctx context.Context, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { +func (bn *BeaconClient) GetBeaconBlockByExecutionHash(ctx context.Context, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { var headInfo eth2api.BeaconBlockHeaderAndInfo if exists, err := beaconapi.BlockHeader(ctx, bn.API, eth2api.BlockHead, &headInfo); err != nil { return nil, fmt.Errorf("failed to poll head: %v", err) @@ -231,7 +462,7 @@ func (bn *BeaconNode) GetBeaconBlockByExecutionHash(ctx context.Context, hash et return nil, nil } -func (b BeaconNodes) GetBeaconBlockByExecutionHash(ctx context.Context, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { +func (b BeaconClients) GetBeaconBlockByExecutionHash(ctx context.Context, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { for _, bn := range b { block, err := bn.GetBeaconBlockByExecutionHash(ctx, hash) if err != nil || block != nil { @@ -242,15 +473,192 @@ func (b BeaconNodes) GetBeaconBlockByExecutionHash(ctx context.Context, hash eth } type ValidatorClient struct { - *hivesim.Client - keys []*setup.KeyDetails + T *hivesim.T + HiveClient *hivesim.Client + ClientType string + OptionsGenerator func([]*setup.KeyDetails) ([]hivesim.StartOption, error) + Keys []*setup.KeyDetails +} + +func NewValidatorClient(t *hivesim.T, validatorDef *hivesim.ClientDefinition, optionsGenerator func([]*setup.KeyDetails) ([]hivesim.StartOption, error), keys []*setup.KeyDetails) *ValidatorClient { + return &ValidatorClient{ + T: t, + ClientType: validatorDef.Name, + OptionsGenerator: optionsGenerator, + Keys: keys, + } +} + +func (vc *ValidatorClient) Start() error { + if vc.HiveClient != nil { + return fmt.Errorf("Client already started") + } + if len(vc.Keys) == 0 { + vc.T.Logf("Skipping validator because it has 0 validator keys") + return nil + } + vc.T.Logf("Starting client %s", vc.ClientType) + opts, err := vc.OptionsGenerator(vc.Keys) + if err != nil { + return fmt.Errorf("Unable to get start options: %v", err) + } + vc.HiveClient = vc.T.StartClient(vc.ClientType, opts...) + return nil +} + +func (vc *ValidatorClient) IsRunning() bool { + return vc.HiveClient != nil } func (v *ValidatorClient) ContainsKey(pk [48]byte) bool { - for _, k := range v.keys { + for _, k := range v.Keys { if k.ValidatorPubkey == pk { return true } } return false } + +type ValidatorClients []*ValidatorClient + +// Return subset of clients that are currently running +func (all ValidatorClients) Running() ValidatorClients { + res := make(ValidatorClients, 0) + for _, vc := range all { + if vc.IsRunning() { + res = append(res, vc) + } + } + return res +} + +// A validator client bundle consists of: +// - Execution client +// - Beacon client +// - Validator client +type NodeClientBundle struct { + T *hivesim.T + Index int + ExecutionClient *ExecutionClient + BeaconClient *BeaconClient + ValidatorClient *ValidatorClient + Verification bool +} + +// Starts all clients included in the bundle +func (cb *NodeClientBundle) Start() error { + cb.T.Logf("Starting validator client bundle %d", cb.Index) + if cb.ExecutionClient != nil { + if err := cb.ExecutionClient.Start(); err != nil { + return err + } + } else { + cb.T.Logf("No execution client started") + } + if cb.BeaconClient != nil { + if err := cb.BeaconClient.Start(); err != nil { + return err + } + } else { + cb.T.Logf("No beacon client started") + } + if cb.ValidatorClient != nil { + if err := cb.ValidatorClient.Start(); err != nil { + return err + } + } else { + cb.T.Logf("No validator client started") + } + return nil +} + +type NodeClientBundles []NodeClientBundle + +// Return all execution clients, even the ones not currently running +func (cbs NodeClientBundles) ExecutionClients() ExecutionClients { + en := make(ExecutionClients, 0) + for _, cb := range cbs { + if cb.ExecutionClient != nil { + en = append(en, cb.ExecutionClient) + } + } + return en +} + +// Return all proxy pointers, even the ones not currently running +func (cbs NodeClientBundles) Proxies() Proxies { + ps := make(Proxies, 0) + for _, cb := range cbs { + if cb.ExecutionClient != nil { + ps = append(ps, cb.ExecutionClient.proxy) + } + } + return ps +} + +// Return all beacon clients, even the ones not currently running +func (cbs NodeClientBundles) BeaconClients() BeaconClients { + bn := make(BeaconClients, 0) + for _, cb := range cbs { + if cb.BeaconClient != nil { + bn = append(bn, cb.BeaconClient) + } + } + return bn +} + +// Return all validator clients, even the ones not currently running +func (cbs NodeClientBundles) ValidatorClients() ValidatorClients { + vc := make(ValidatorClients, 0) + for _, cb := range cbs { + if cb.ValidatorClient != nil { + vc = append(vc, cb.ValidatorClient) + } + } + return vc +} + +// Return subset of nodes which are marked as verification nodes +func (all NodeClientBundles) VerificationNodes() NodeClientBundles { + // If none is set as verification, then all are verification nodes + var any bool + for _, cb := range all { + if cb.Verification { + any = true + break + } + } + if !any { + return all + } + + res := make(NodeClientBundles, 0) + for _, cb := range all { + if cb.Verification { + res = append(res, cb) + } + } + return res +} + +func (cbs NodeClientBundles) RemoveNodeAsVerifier(id int) error { + if id >= len(cbs) { + return fmt.Errorf("Node %d does not exist", id) + } + var any bool + for _, cb := range cbs { + if cb.Verification { + any = true + break + } + } + if any { + cbs[id].Verification = false + } else { + // If no node is set as verifier, we will set all other nodes as verifiers then + for i := range cbs { + cbs[i].Verification = (i != id) + } + } + return nil +} diff --git a/simulators/eth2/engine/prepared_testnet.go b/simulators/eth2/engine/prepared_testnet.go index 29240a524d..aa506e0200 100644 --- a/simulators/eth2/engine/prepared_testnet.go +++ b/simulators/eth2/engine/prepared_testnet.go @@ -1,11 +1,9 @@ package main import ( - "encoding/hex" "encoding/json" "fmt" "math/big" - "net" "os" "strings" "time" @@ -201,7 +199,7 @@ func (p *PreparedTestnet) createTestnet(t *hivesim.T) *Testnet { genesisTime, _ := p.eth2Genesis.GenesisTime() genesisValidatorsRoot, _ := p.eth2Genesis.GenesisValidatorsRoot() return &Testnet{ - t: t, + T: t, genesisTime: genesisTime, genesisValidatorsRoot: genesisValidatorsRoot, spec: p.spec, @@ -209,121 +207,126 @@ func (p *PreparedTestnet) createTestnet(t *hivesim.T) *Testnet { } } -func (p *PreparedTestnet) startEth1Node(testnet *Testnet, eth1Def *hivesim.ClientDefinition, consensus setup.Eth1Consensus, ttd *big.Int, chain []*types.Block) { - testnet.t.Logf("Starting eth1 node: %s (%s)", eth1Def.Name, eth1Def.Version) +// Prepares an execution client object with all the necessary information +// to start +func (p *PreparedTestnet) prepareExecutionNode(testnet *Testnet, eth1Def *hivesim.ClientDefinition, consensus setup.Eth1Consensus, ttd *big.Int, executionIndex int, chain []*types.Block) *ExecutionClient { + testnet.Logf("Preparing execution node: %s (%s)", eth1Def.Name, eth1Def.Version) + + // This method will return the options used to run the client. + // Requires a method that returns the rest of the currently running + // execution clients on the network at startup. + optionsGenerator := func() ([]hivesim.StartOption, error) { + opts := []hivesim.StartOption{p.executionOpts} + opts = append(opts, consensus.HiveParams(executionIndex)) + + currentlyRunningEcs := testnet.ExecutionClients().Running() + if len(currentlyRunningEcs) > 0 { + bootnode, err := currentlyRunningEcs.Enodes() + if err != nil { + return nil, err + } - opts := []hivesim.StartOption{p.executionOpts} - opts = append(opts, consensus.HiveParams(len(testnet.eth1))) - if len(testnet.eth1) > 0 { - bootnode, err := testnet.eth1[0].EnodeURL() - if err != nil { - testnet.t.Fatalf("failed to get eth1 bootnode URL: %v", err) + // Make the client connect to the first eth1 node, as a bootnode for the eth1 net + opts = append(opts, hivesim.Params{"HIVE_BOOTNODE": bootnode}) } - - // Make the client connect to the first eth1 node, as a bootnode for the eth1 net - opts = append(opts, hivesim.Params{"HIVE_BOOTNODE": bootnode}) - } - if ttd != nil { - opts = append(opts, hivesim.Params{"HIVE_TERMINAL_TOTAL_DIFFICULTY": fmt.Sprintf("%d", ttd.Int64())}) - } - if chain != nil && len(chain) > 0 { - // Bundle the chain into the container - chainParam, err := setup.ChainBundle(chain) - if err != nil { - panic(err) + if ttd != nil { + opts = append(opts, hivesim.Params{"HIVE_TERMINAL_TOTAL_DIFFICULTY": fmt.Sprintf("%d", ttd.Int64())}) } - opts = append(opts, chainParam) - } - en := &Eth1Node{testnet.t.StartClient(eth1Def.Name, opts...)} - dest, _ := en.EngineRPCAddress() - testnet.eth1 = append(testnet.eth1, en) - simIP, err := testnet.t.Sim.ContainerNetworkIP(testnet.t.SuiteID, "bridge", "simulation") - if err != nil { - panic(err) - } - secret, err := hex.DecodeString("7365637265747365637265747365637265747365637265747365637265747365") - if err != nil { - panic(err) + if chain != nil && len(chain) > 0 { + // Bundle the chain into the container + chainParam, err := setup.ChainBundle(chain) + if err != nil { + return nil, err + } + opts = append(opts, chainParam) + } + return opts, nil } - testnet.proxies = append(testnet.proxies, NewProxy(net.ParseIP(simIP), PortEngineRPC+len(testnet.eth1), dest, secret)) + return NewExecutionClient(testnet.T, eth1Def, optionsGenerator, PortEngineRPC+executionIndex) } -func (p *PreparedTestnet) startBeaconNode(testnet *Testnet, beaconDef *hivesim.ClientDefinition, ttd *big.Int, eth1Endpoints []int) { - testnet.t.Logf("Starting beacon node: %s (%s)", beaconDef.Name, beaconDef.Version) +// Prepares a beacon client object with all the necessary information +// to start +func (p *PreparedTestnet) prepareBeaconNode(testnet *Testnet, beaconDef *hivesim.ClientDefinition, ttd *big.Int, beaconIndex int, eth1Endpoints ...*ExecutionClient) *BeaconClient { + testnet.Logf("Preparing beacon node: %s (%s)", beaconDef.Name, beaconDef.Version) + + // This method will return the options used to run the client. + // Requires a method that returns the rest of the currently running + // beacon clients on the network at startup. + optionsGenerator := func() ([]hivesim.StartOption, error) { - opts := []hivesim.StartOption{p.beaconOpts} - // Hook up beacon node to (maybe multiple) eth1 nodes - for _, index := range eth1Endpoints { - if index < 0 || index >= len(testnet.eth1) { - testnet.t.Fatalf("only have %d eth1 nodes, cannot find index %d for BN", len(testnet.eth1), index) + opts := []hivesim.StartOption{p.beaconOpts} + + // Hook up beacon node to (maybe multiple) eth1 nodes + var addrs []string + var engineAddrs []string + for _, en := range eth1Endpoints { + if !en.IsRunning() || en.Proxy() == nil { + return nil, fmt.Errorf("Attempted to start beacon node when the execution client is not yet running") + } + execNode := en.Proxy() + userRPC, err := execNode.UserRPCAddress() + if err != nil { + return nil, fmt.Errorf("eth1 node used for beacon without available RPC: %v", err) + } + addrs = append(addrs, userRPC) + engineRPC, err := execNode.EngineRPCAddress() + if err != nil { + return nil, fmt.Errorf("eth1 node used for beacon without available RPC: %v", err) + } + engineAddrs = append(engineAddrs, engineRPC) } - } + opts = append(opts, hivesim.Params{ + "HIVE_ETH2_ETH1_RPC_ADDRS": strings.Join(addrs, ","), + "HIVE_ETH2_ETH1_ENGINE_RPC_ADDRS": strings.Join(engineAddrs, ","), + "HIVE_ETH2_BEACON_NODE_INDEX": fmt.Sprintf("%d", beaconIndex), + }) - var addrs []string - var engineAddrs []string - for _, index := range eth1Endpoints { - eth1Node := testnet.proxies[index] - userRPC, err := eth1Node.UserRPCAddress() + bootnodeENRs, err := testnet.BeaconClients().Running().ENRs() if err != nil { - testnet.t.Fatalf("eth1 node used for beacon without available RPC: %v", err) + return nil, fmt.Errorf("failed to get ENR as bootnode for every beacon node: %v", err) } - addrs = append(addrs, userRPC) - engineRPC, err := eth1Node.EngineRPCAddress() - if err != nil { - testnet.t.Fatalf("eth1 node used for beacon without available RPC: %v", err) + if bootnodeENRs != "" { + opts = append(opts, hivesim.Params{"HIVE_ETH2_BOOTNODE_ENRS": bootnodeENRs}) } - engineAddrs = append(engineAddrs, engineRPC) - } - opts = append(opts, hivesim.Params{ - "HIVE_ETH2_ETH1_RPC_ADDRS": strings.Join(addrs, ","), - "HIVE_ETH2_ETH1_ENGINE_RPC_ADDRS": strings.Join(engineAddrs, ","), - }) - if len(testnet.beacons) > 0 { - bootnodeENRs, err := testnet.beacons.ENRs() - if err != nil { - testnet.t.Fatalf("failed to get ENR as bootnode for every beacon node: %v", err) + if ttd != nil { + opts = append(opts, hivesim.Params{"HIVE_TERMINAL_TOTAL_DIFFICULTY": fmt.Sprintf("%d", ttd)}) } - opts = append(opts, hivesim.Params{"HIVE_ETH2_BOOTNODE_ENRS": bootnodeENRs}) - } - - if ttd != nil { - opts = append(opts, hivesim.Params{"HIVE_TERMINAL_TOTAL_DIFFICULTY": fmt.Sprintf("%d", ttd)}) + return opts, nil } // TODO //if p.configName != "mainnet" && hasBuildTarget(beaconDef, p.configName) { // opts = append(opts, hivesim.WithBuildTarget(p.configName)) //} - bn := NewBeaconNode(testnet.t.StartClient(beaconDef.Name, opts...), testnet.genesisTime, testnet.spec, len(testnet.beacons)) - testnet.beacons = append(testnet.beacons, bn) + return NewBeaconClient(testnet.T, beaconDef, optionsGenerator, testnet.genesisTime, testnet.spec, beaconIndex) } -func (p *PreparedTestnet) startValidatorClient(testnet *Testnet, validatorDef *hivesim.ClientDefinition, bnIndex int, keyIndex int) { - if len(p.keyTranches[keyIndex]) == 0 { - testnet.validators = append(testnet.validators, &ValidatorClient{nil, make([]*setup.KeyDetails, 0)}) - testnet.t.Logf("Skipping validator %d because it has 0 validator keys", keyIndex) - return - } - testnet.t.Logf("Starting validator client: %s (%s)", validatorDef.Name, validatorDef.Version) - - if bnIndex >= len(testnet.beacons) { - testnet.t.Fatalf("only have %d beacon nodes, cannot find index %d for VC", len(testnet.beacons), bnIndex) - } - bn := testnet.beacons[bnIndex] - // Hook up validator to beacon node - bnAPIOpt := hivesim.Params{ - "HIVE_ETH2_BN_API_IP": bn.IP.String(), - } +// Prepares a validator client object with all the necessary information +// to eventually start the client. +func (p *PreparedTestnet) prepareValidatorClient(testnet *Testnet, validatorDef *hivesim.ClientDefinition, bn *BeaconClient, keyIndex int) *ValidatorClient { + testnet.Logf("Preparing validator client: %s (%s)", validatorDef.Name, validatorDef.Version) if keyIndex >= len(p.keyTranches) { - testnet.t.Fatalf("only have %d key tranches, cannot find index %d for VC", len(p.keyTranches), keyIndex) + testnet.Fatalf("only have %d key tranches, cannot find index %d for VC", len(p.keyTranches), keyIndex) } - keysOpt := setup.KeysBundle(p.keyTranches[keyIndex]) - opts := []hivesim.StartOption{p.validatorOpts, keysOpt, bnAPIOpt} - // TODO - //if p.configName != "mainnet" && hasBuildTarget(validatorDef, p.configName) { - // opts = append(opts, hivesim.WithBuildTarget(p.configName)) - //} - vc := &ValidatorClient{testnet.t.StartClient(validatorDef.Name, opts...), p.keyTranches[keyIndex]} - testnet.validators = append(testnet.validators, vc) + // This method will return the options used to run the client. + // Requires the beacon client object to which to connect. + optionsGenerator := func(validatorKeys []*setup.KeyDetails) ([]hivesim.StartOption, error) { + if !bn.IsRunning() { + return nil, fmt.Errorf("Attempted to start a validator when the beacon node is not running") + } + // Hook up validator to beacon node + bnAPIOpt := hivesim.Params{ + "HIVE_ETH2_BN_API_IP": bn.HiveClient.IP.String(), + } + keysOpt := setup.KeysBundle(validatorKeys) + opts := []hivesim.StartOption{p.validatorOpts, keysOpt, bnAPIOpt} + // TODO + //if p.configName != "mainnet" && hasBuildTarget(validatorDef, p.configName) { + // opts = append(opts, hivesim.WithBuildTarget(p.configName)) + //} + return opts, nil + } + return NewValidatorClient(testnet.T, validatorDef, optionsGenerator, p.keyTranches[keyIndex]) } diff --git a/simulators/eth2/engine/running_testnet.go b/simulators/eth2/engine/running_testnet.go index dde00dcb0b..b9e595aa3f 100644 --- a/simulators/eth2/engine/running_testnet.go +++ b/simulators/eth2/engine/running_testnet.go @@ -26,7 +26,8 @@ import ( var MAX_PARTICIPATION_SCORE = 7 type Testnet struct { - t *hivesim.T + *hivesim.T + NodeClientBundles genesisTime common.Timestamp genesisValidatorsRoot common.Root @@ -35,98 +36,65 @@ type Testnet struct { spec *common.Spec // Execution chain configuration and genesis info eth1Genesis *setup.Eth1Genesis - - beacons BeaconNodes - validators []*ValidatorClient - eth1 []*Eth1Node - proxies []*Proxy - verification []bool -} - -func (t *Testnet) verificationExecution() []*Eth1Node { - verificationExecution := make([]*Eth1Node, 0) - for i, v := range t.verification { - if v { - verificationExecution = append(verificationExecution, t.eth1[i]) - } - } - if len(verificationExecution) == 0 { - return t.eth1 - } - return verificationExecution -} - -func (t *Testnet) verificationBeacons() []*BeaconNode { - verificationBeacons := make([]*BeaconNode, 0) - for i, v := range t.verification { - if v { - verificationBeacons = append(verificationBeacons, t.beacons[i]) - } - } - if len(verificationBeacons) == 0 { - return t.beacons - } - return verificationBeacons -} - -func (t *Testnet) verificationProxies() []*Proxy { - verificationProxies := make([]*Proxy, 0) - for i, v := range t.verification { - if v { - verificationProxies = append(verificationProxies, t.proxies[i]) - } - } - if len(verificationProxies) == 0 { - return t.proxies - } - return verificationProxies -} - -func (t *Testnet) removeNodeAsVerifier(id int) error { - if id >= len(t.verification) { - return fmt.Errorf("Node %d does not exist", id) - } - var any bool - for _, v := range t.verification { - if v { - any = true - break - } - } - if any { - t.verification[id] = false - } else { - // If no node is set as verifier, we will set all other nodes as verifiers then - for i := range t.verification { - t.verification[i] = (i != id) - } - } - return nil } func startTestnet(t *hivesim.T, env *testEnv, config *Config) *Testnet { prep := prepareTestnet(t, env, config) testnet := prep.createTestnet(t) - genesisTime := testnet.GenesisTime() countdown := genesisTime.Sub(time.Now()) t.Logf("Created new testnet, genesis at %s (%s from now)", genesisTime, countdown) - // for each key partition, we start a validator client with its own beacon node and eth1 node - for i, node := range config.Nodes { - prep.startEth1Node(testnet, env.Clients.ClientByNameAndRole(node.ExecutionClient, "eth1"), config.Eth1Consensus, node.ExecutionClientTTD, node.Chain) + testnet.NodeClientBundles = make(NodeClientBundles, len(config.Nodes)) + + // For each key partition, we start a client bundle that consists of: + // - 1 execution client + // - 1 beacon client + // - 1 validator client, + for nodeIndex, node := range config.Nodes { + // Prepare clients for this node + var ( + ec *ExecutionClient + bc *BeaconClient + vc *ValidatorClient + + executionDef = env.Clients.ClientByNameAndRole(node.ExecutionClient, "eth1") + beaconDef = env.Clients.ClientByNameAndRole(fmt.Sprintf("%s-bn", node.ConsensusClient), "beacon") + validatorDef = env.Clients.ClientByNameAndRole(fmt.Sprintf("%s-vc", node.ConsensusClient), "validator") + ) + + if executionDef == nil || beaconDef == nil || validatorDef == nil { + t.Fatalf("FAIL: Unable to get client") + } + + // Prepare the client objects with all the information necessary to eventually start + ec = prep.prepareExecutionNode(testnet, executionDef, config.Eth1Consensus, node.ExecutionClientTTD, nodeIndex, node.Chain) if node.ConsensusClient != "" { - prep.startBeaconNode(testnet, env.Clients.ClientByNameAndRole(fmt.Sprintf("%s-bn", node.ConsensusClient), "beacon"), node.BeaconNodeTTD, []int{i}) - prep.startValidatorClient(testnet, env.Clients.ClientByNameAndRole(fmt.Sprintf("%s-vc", node.ConsensusClient), "validator"), i, i) + bc = prep.prepareBeaconNode(testnet, beaconDef, node.BeaconNodeTTD, nodeIndex, ec) + vc = prep.prepareValidatorClient(testnet, validatorDef, bc, nodeIndex) + } + + // Bundle all the clients together + testnet.NodeClientBundles[nodeIndex] = NodeClientBundle{ + T: t, + Index: nodeIndex, + Verification: node.TestVerificationNode, + ExecutionClient: ec, + BeaconClient: bc, + ValidatorClient: vc, + } + + // Start the node clients if specified so + if !node.DisableStartup { + testnet.NodeClientBundles[nodeIndex].Start() } - testnet.verification = append(testnet.verification, node.TestVerificationNode) } return testnet } func (t *Testnet) stopTestnet() { - for _, p := range t.proxies { + for _, p := range t.Proxies().Running() { p.cancel() } } @@ -140,7 +108,7 @@ func (t *Testnet) SlotsTimeout(slots common.Slot) <-chan time.Time { } func (t *Testnet) ValidatorClientIndex(pk [48]byte) (int, error) { - for i, v := range t.validators { + for i, v := range t.ValidatorClients() { if v.ContainsKey(pk) { return i, nil } @@ -154,7 +122,8 @@ func (t *Testnet) WaitForFinality(ctx context.Context, timeoutSlots common.Slot) genesis := t.GenesisTime() slotDuration := time.Duration(t.spec.SECONDS_PER_SLOT) * time.Second timer := time.NewTicker(slotDuration) - done := make(chan common.Checkpoint, len(t.verificationBeacons())) + runningBeacons := t.VerificationNodes().BeaconClients().Running() + done := make(chan common.Checkpoint, len(runningBeacons)) var timeout <-chan time.Time if timeoutSlots > 0 { timeout = t.SlotsTimeout(timeoutSlots) @@ -172,7 +141,7 @@ func (t *Testnet) WaitForFinality(ctx context.Context, timeoutSlots common.Slot) case tim := <-timer.C: // start polling after first slot of genesis if tim.Before(genesis.Add(slotDuration)) { - t.t.Logf("Time till genesis: %s", genesis.Sub(tim)) + t.Logf("Time till genesis: %s", genesis.Sub(tim)) continue } @@ -184,11 +153,11 @@ func (t *Testnet) WaitForFinality(ctx context.Context, timeoutSlots common.Slot) } var ( wg sync.WaitGroup - ch = make(chan res, len(t.verificationBeacons())) + ch = make(chan res, len(runningBeacons)) ) - for i, b := range t.verificationBeacons() { + for i, b := range runningBeacons { wg.Add(1) - go func(ctx context.Context, i int, b *BeaconNode, ch chan res) { + go func(ctx context.Context, i int, b *BeaconClient, ch chan res) { defer wg.Done() ctx, cancel := context.WithTimeout(ctx, time.Second*5) defer cancel() @@ -267,7 +236,148 @@ func (t *Testnet) WaitForFinality(ctx context.Context, timeoutSlots common.Slot) close(ch) // print out logs in ascending idx order - sorted := make([]string, len(t.verificationBeacons())) + sorted := make([]string, len(runningBeacons)) + for out := range ch { + if out.err != nil { + return common.Checkpoint{}, out.err + } + sorted[out.idx] = out.msg + } + for _, msg := range sorted { + t.Logf(msg) + } + } + } +} + +// Waits for the current epoch to be finalized, or timeoutSlots have passed, whichever happens first. +func (t *Testnet) WaitForCurrentEpochFinalization(ctx context.Context, timeoutSlots common.Slot) (common.Checkpoint, error) { + genesis := t.GenesisTime() + slotDuration := time.Duration(t.spec.SECONDS_PER_SLOT) * time.Second + timer := time.NewTicker(slotDuration) + runningBeacons := t.VerificationNodes().BeaconClients().Running() + done := make(chan common.Checkpoint, len(runningBeacons)) + var timeout <-chan time.Time + if timeoutSlots > 0 { + timeout = t.SlotsTimeout(timeoutSlots) + } else { + timeout = make(<-chan time.Time) + } + + // Get the current head root which must be finalized + var ( + epochToBeFinalized common.Epoch + headInfo eth2api.BeaconBlockHeaderAndInfo + ) + if exists, err := beaconapi.BlockHeader(ctx, runningBeacons[0].API, eth2api.BlockHead, &headInfo); err != nil { + return common.Checkpoint{}, fmt.Errorf("failed to poll head: %v", err) + } else if !exists { + return common.Checkpoint{}, fmt.Errorf("no head block") + } + epochToBeFinalized = t.spec.SlotToEpoch(headInfo.Header.Message.Slot) + + for { + select { + case <-ctx.Done(): + return common.Checkpoint{}, fmt.Errorf("context called") + case <-timeout: + return common.Checkpoint{}, fmt.Errorf("Timeout") + case finalized := <-done: + return finalized, nil + case tim := <-timer.C: + // start polling after first slot of genesis + if tim.Before(genesis.Add(slotDuration)) { + t.Logf("Time till genesis: %s", genesis.Sub(tim)) + continue + } + + // new slot, log and check status of all beacon nodes + type res struct { + idx int + msg string + err error + } + var ( + wg sync.WaitGroup + ch = make(chan res, len(runningBeacons)) + ) + for i, b := range runningBeacons { + wg.Add(1) + go func(ctx context.Context, i int, b *BeaconClient, ch chan res) { + defer wg.Done() + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + + var ( + slot common.Slot + head string + justified string + finalized string + execution string + health float64 + ) + + var headInfo eth2api.BeaconBlockHeaderAndInfo + if exists, err := beaconapi.BlockHeader(ctx, b.API, eth2api.BlockHead, &headInfo); err != nil { + ch <- res{err: fmt.Errorf("beacon %d: failed to poll head: %v", i, err)} + return + } else if !exists { + ch <- res{err: fmt.Errorf("beacon %d: no head block", i)} + return + } + + var checkpoints eth2api.FinalityCheckpoints + if exists, err := beaconapi.FinalityCheckpoints(ctx, b.API, eth2api.StateIdRoot(headInfo.Header.Message.StateRoot), &checkpoints); err != nil || !exists { + if exists, err = beaconapi.FinalityCheckpoints(ctx, b.API, eth2api.StateIdSlot(headInfo.Header.Message.Slot), &checkpoints); err != nil { + ch <- res{err: fmt.Errorf("beacon %d: failed to poll finality checkpoint: %v", i, err)} + return + } else if !exists { + ch <- res{err: fmt.Errorf("beacon %d: Expected state for head block", i)} + return + } + } + + var versionedBlock eth2api.VersionedSignedBeaconBlock + if exists, err := beaconapi.BlockV2(ctx, b.API, eth2api.BlockIdRoot(headInfo.Root), &versionedBlock); err != nil { + ch <- res{err: fmt.Errorf("beacon %d: failed to retrieve block: %v", i, err)} + return + } else if !exists { + ch <- res{err: fmt.Errorf("beacon %d: block not found", i)} + return + } + switch versionedBlock.Version { + case "phase0": + execution = "0x0000..0000" + case "altair": + execution = "0x0000..0000" + case "bellatrix": + block := versionedBlock.Data.(*bellatrix.SignedBeaconBlock) + execution = shorten(block.Message.Body.ExecutionPayload.BlockHash.String()) + } + + slot = headInfo.Header.Message.Slot + head = shorten(headInfo.Root.String()) + justified = shorten(checkpoints.CurrentJustified.String()) + finalized = shorten(checkpoints.Finalized.String()) + health, err := getHealth(ctx, b.API, t.spec, slot) + if err != nil { + // warning is printed here instead because some clients + // don't support the required REST endpoint. + fmt.Printf("WARN: beacon %d: %s\n", i, err) + } + + ch <- res{i, fmt.Sprintf("beacon %d: slot=%d, head=%s, health=%.2f, exec_payload=%s, justified=%s, finalized=%s", i, slot, head, health, execution, justified, finalized), nil} + + if checkpoints.Finalized != (common.Checkpoint{}) && checkpoints.Finalized.Epoch >= epochToBeFinalized { + done <- checkpoints.Finalized + } + }(ctx, i, b, ch) + } + wg.Wait() + close(ch) + + // print out logs in ascending idx order + sorted := make([]string, len(runningBeacons)) for out := range ch { if out.err != nil { return common.Checkpoint{}, out.err @@ -275,7 +385,7 @@ func (t *Testnet) WaitForFinality(ctx context.Context, timeoutSlots common.Slot) sorted[out.idx] = out.msg } for _, msg := range sorted { - t.t.Logf(msg) + t.Logf(msg) } } } @@ -287,8 +397,9 @@ func (t *Testnet) WaitForExecutionPayload(ctx context.Context, timeoutSlots comm genesis := t.GenesisTime() slotDuration := time.Duration(t.spec.SECONDS_PER_SLOT) * time.Second timer := time.NewTicker(slotDuration) - done := make(chan ethcommon.Hash, len(t.verificationBeacons())) - userRPCAddress, err := t.eth1[0].UserRPCAddress() + runningBeacons := t.VerificationNodes().BeaconClients().Running() + done := make(chan ethcommon.Hash, len(runningBeacons)) + userRPCAddress, err := t.ExecutionClients().Running()[0].UserRPCAddress() if err != nil { panic(err) } @@ -312,7 +423,7 @@ func (t *Testnet) WaitForExecutionPayload(ctx context.Context, timeoutSlots comm case tim := <-timer.C: // start polling after first slot of genesis if tim.Before(genesis.Add(slotDuration)) { - t.t.Logf("Time till genesis: %s", genesis.Sub(tim)) + t.Logf("Time till genesis: %s", genesis.Sub(tim)) continue } @@ -321,11 +432,11 @@ func (t *Testnet) WaitForExecutionPayload(ctx context.Context, timeoutSlots comm var td *TotalDifficultyHeader if err := rpcClient.CallContext(ctx, &td, "eth_getBlockByNumber", "latest", false); err == nil { if td.TotalDifficulty.ToInt().Cmp(t.eth1Genesis.Genesis.Config.TerminalTotalDifficulty) >= 0 { - t.t.Logf("ttd (%d) reached at execution block %d", t.eth1Genesis.Genesis.Config.TerminalTotalDifficulty, td.Number) + t.Logf("ttd (%d) reached at execution block %d", t.eth1Genesis.Genesis.Config.TerminalTotalDifficulty, td.Number) ttdReached = true } } else { - t.t.Logf("Error querying eth1 for TTD: %v", err) + t.Logf("Error querying eth1 for TTD: %v", err) } } @@ -338,11 +449,11 @@ func (t *Testnet) WaitForExecutionPayload(ctx context.Context, timeoutSlots comm var ( wg sync.WaitGroup - ch = make(chan res, len(t.verificationBeacons())) + ch = make(chan res, len(runningBeacons)) ) - for i, b := range t.verificationBeacons() { + for i, b := range runningBeacons { wg.Add(1) - go func(ctx context.Context, i int, b *BeaconNode, ch chan res) { + go func(ctx context.Context, i int, b *BeaconClient, ch chan res) { defer wg.Done() ctx, cancel := context.WithTimeout(ctx, time.Second*5) defer cancel() @@ -404,7 +515,7 @@ func (t *Testnet) WaitForExecutionPayload(ctx context.Context, timeoutSlots comm close(ch) // print out logs in ascending idx order - sorted := make([]string, len(t.verificationBeacons())) + sorted := make([]string, len(runningBeacons)) for out := range ch { if out.err != nil { return ethcommon.Hash{}, out.err @@ -412,7 +523,7 @@ func (t *Testnet) WaitForExecutionPayload(ctx context.Context, timeoutSlots comm sorted[out.idx] = out.msg } for _, msg := range sorted { - t.t.Logf(msg) + t.Logf(msg) } } diff --git a/simulators/eth2/engine/scenarios.go b/simulators/eth2/engine/scenarios.go index 190fa9fe2a..dea12b3f98 100644 --- a/simulators/eth2/engine/scenarios.go +++ b/simulators/eth2/engine/scenarios.go @@ -50,10 +50,7 @@ var ( "prysm": true, } - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE = map[string]*big.Int{ - "teku": big.NewInt(128), - "lighthouse": big.NewInt(128), - } + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE = map[string]*big.Int{} ) func getClientConfig(n node) *Config { @@ -127,7 +124,7 @@ func TestRPCError(t *hivesim.T, env *testEnv, n node) { Method: EngineForkchoiceUpdatedV1, Fields: fields, } - testnet.proxies[0].AddRequest(spoof) + testnet.Proxies().Running()[0].AddRequest(spoof) time.Sleep(24 * time.Second) if err := VerifyParticipation(testnet, ctx, FirstSlotAfterCheckpoint{&finalized}, 0.95); err != nil { t.Fatalf("FAIL: %v", err) @@ -274,7 +271,7 @@ func UnknownPoWParent(t *hivesim.T, env *testEnv, n node) { return nil } } - for n, p := range testnet.proxies { + for n, p := range testnet.Proxies().Running() { p.AddResponseCallback(EngineGetPayloadV1, getPayloadCallbackGen(n)) p.AddResponseCallback(EngineNewPayloadV1, newPayloadCallbackGen(n)) p.AddResponseCallback(EngineForkchoiceUpdatedV1, fcUCallbackGen(n)) @@ -345,7 +342,7 @@ func InvalidPayloadGen(invalidPayloadNumber int, invalidStatusResponse PayloadSt invalidPayloadHash = payload.BlockHash invalidPayloadProxyId = id // Remove this validator from the verification - testnet.removeNodeAsVerifier(id) + testnet.RemoveNodeAsVerifier(id) } return nil } @@ -390,7 +387,7 @@ func InvalidPayloadGen(invalidPayloadNumber int, invalidStatusResponse PayloadSt } } // We pass the id of the proxy to identify which one it is within the callback - for i, p := range testnet.proxies { + for i, p := range testnet.Proxies().Running() { p.AddResponseCallback(EngineGetPayloadV1, getPayloadCallbackGen(i)) p.AddResponseCallback(EngineNewPayloadV1, newPayloadCallbackGen(i)) } @@ -462,7 +459,7 @@ func IncorrectHeaderPrevRandaoPayload(t *hivesim.T, env *testEnv, n node) { } return nil } - for _, p := range testnet.proxies { + for _, p := range testnet.Proxies().Running() { p.AddResponseCallback(EngineGetPayloadV1, c) } @@ -560,7 +557,7 @@ func InvalidTimestampPayload(t *hivesim.T, env *testEnv, n node) { fcUCountLimit = 8 ) - for _, p := range testnet.proxies { + for _, p := range testnet.Proxies().Running() { p.AddResponseCallback(EngineGetPayloadV1, c) p.AddResponseCallback(EngineForkchoiceUpdatedV1, CheckErrorOnForkchoiceUpdatedPayloadAttr(&fcuLock, fcUCountLimit, &fcUAttrCount, fcudone)) } @@ -602,8 +599,8 @@ func IncorrectTTDConfigEL(t *hivesim.T, env *testEnv, n node) { defer testnet.stopTestnet() var ( - builder = testnet.beacons[0] - eth = testnet.eth1[0] + builder = testnet.BeaconClients().Running()[0] + eth = testnet.ExecutionClients().Running()[0] ec = NewEngineClient(t, eth, big.NewInt(elTTD)) ) @@ -665,7 +662,7 @@ func IncorrectTerminalBlockGen(ttdDelta int64) func(t *hivesim.T, env *testEnv, defer testnet.stopTestnet() var ( - badTTDImporter = testnet.beacons[3] + badTTDImporter = testnet.BeaconClients().Running()[3] ) // Wait for all execution clients with the correct TTD reach the merge @@ -675,7 +672,7 @@ func IncorrectTerminalBlockGen(ttdDelta int64) func(t *hivesim.T, env *testEnv, if err != nil { t.Fatalf("FAIL: Waiting for execution payload: %v", err) } - ec := NewEngineClient(t, testnet.eth1[0], config.TerminalTotalDifficulty) + ec := NewEngineClient(t, testnet.ExecutionClients().Running()[0], config.TerminalTotalDifficulty) transitionHeader, err := ec.Eth.HeaderByHash(ec.Ctx(), transitionPayloadHash) if err != nil { t.Fatalf("FAIL: Unable to get transition payload header from execution client: %v", err) @@ -845,7 +842,7 @@ func SyncingWithInvalidChain(t *hivesim.T, env *testEnv, n node) { } var ( - importerProxy = testnet.proxies[2] + importerProxy = testnet.Proxies().Running()[2] ) // Add the callback to the last proxy which will not produce blocks @@ -860,7 +857,7 @@ func SyncingWithInvalidChain(t *hivesim.T, env *testEnv, n node) { time.Sleep(time.Duration(testnet.spec.SECONDS_PER_SLOT) * time.Second * 5) // Verify the head of the chain, it should be a block with the latestValidHash included - for _, bn := range testnet.verificationBeacons() { + for _, bn := range testnet.VerificationNodes().BeaconClients().Running() { var versionedBlock eth2api.VersionedSignedBeaconBlock if exists, err := beaconapi.BlockV2(ctx, bn.API, eth2api.BlockHead, &versionedBlock); err != nil { t.Fatalf("FAIL: Unable to poll beacon chain head: %v", err) @@ -917,7 +914,7 @@ func BaseFeeEncodingCheck(t *hivesim.T, env *testEnv, n node) { // Check the base fee value in the transition payload. // Must be at least 256 to guarantee that the endianess encoding is correct. - ec := NewEngineClient(t, testnet.eth1[0], config.TerminalTotalDifficulty) + ec := NewEngineClient(t, testnet.ExecutionClients().Running()[0], config.TerminalTotalDifficulty) h, err := ec.Eth.HeaderByHash(ec.Ctx(), transitionPayloadHash) if err != nil { t.Fatalf("FAIL: Unable to get transition payload header from execution client: %v", err) @@ -961,7 +958,7 @@ func EqualTimestampTerminalTransitionBlock(t *hivesim.T, env *testEnv, n node) { fcUCountLimit = 5 ) - for _, p := range testnet.proxies { + for _, p := range testnet.Proxies().Running() { p.AddResponseCallback(EngineForkchoiceUpdatedV1, CheckErrorOnForkchoiceUpdatedPayloadAttr(&fcuLock, fcUCountLimit, &fcUAttrCount, fcudone)) } @@ -990,7 +987,7 @@ func TTDBeforeBellatrix(t *hivesim.T, env *testEnv, n node) { defer cancel() _, err := testnet.WaitForExecutionPayload(ctx, SlotsUntilMerge(testnet, config)) if err != nil { - for i, e := range testnet.eth1 { + for i, e := range testnet.ExecutionClients().Running() { ec := NewEngineClient(t, e, config.TerminalTotalDifficulty) if b, err := ec.Eth.BlockByNumber(ec.Ctx(), nil); err == nil { t.Logf("INFO: Last block on execution client %d: number=%d, hash=%s", i, b.NumberU64(), b.Hash()) @@ -1145,7 +1142,7 @@ func InvalidQuantityPayloadFields(t *hivesim.T, env *testEnv, n node) { } // We pass the id of the proxy to identify which one it is within the callback - for i, p := range testnet.proxies { + for i, p := range testnet.Proxies().Running() { p.AddResponseCallback(EngineGetPayloadV1, getPayloadCallbackGen(i)) } @@ -1178,7 +1175,7 @@ func InvalidQuantityPayloadFields(t *hivesim.T, env *testEnv, n node) { func SyncingWithChainHavingValidTransitionBlock(t *hivesim.T, env *testEnv, n node) { var ( - safeSlotsToImportOptimistically = big.NewInt(16) + safeSlotsToImportOptimistically = big.NewInt(8) safeSlotsImportThreshold = uint64(4) ) if clientSafeSlots, ok := SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE[n.ConsensusClient]; ok { @@ -1218,10 +1215,18 @@ func SyncingWithChainHavingValidTransitionBlock(t *hivesim.T, env *testEnv, n no defer testnet.stopTestnet() var ( - builder = testnet.beacons[0] - importer = testnet.beacons[1] + builder = testnet.BeaconClients()[0] + importer = testnet.BeaconClients()[1] + importerProxy = testnet.Proxies().Running()[1] ) + importerResponseMocker := NewEngineResponseMocker(&EngineResponse{ + // By default we respond SYNCING to any payload + Status: Syncing, + LatestValidHash: nil, + }) + importerResponseMocker.AddCallbacksToProxy(importerProxy) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Wait until the builder creates the first block with an execution payload @@ -1229,50 +1234,83 @@ func SyncingWithChainHavingValidTransitionBlock(t *hivesim.T, env *testEnv, n no if err != nil { t.Fatalf("FAIL: Waiting for execution payload on builder: %v", err) } + builderExecutionBlock, err := builder.GetFirstExecutionBeaconBlock(ctx) if err != nil || builderExecutionBlock == nil { t.Fatalf("FAIL: Could not find first execution block") } t.Logf("Builder Execution block found on slot %d", builderExecutionBlock.Message.Slot) + // We wait until the importer reaches optimistic sync + _, err = importer.WaitForOptimisticState(context.Background(), beacon.Slot(safeSlotsToImportOptimistically.Uint64()+safeSlotsImportThreshold), eth2api.BlockIdSlot(builderExecutionBlock.Message.Slot), true) + if err != nil { + t.Fatalf("FAIL: Timeout waiting for beacon node to become optimistic: %v", err) + } + + // Mocked responses are disabled, so the EL can finally validate payloads + importerResponseMocker.Mocking = false + + // Wait until newPayload or forkchoiceUpdated are called at least once + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + select { + case <-importerResponseMocker.NewPayloadCalled: + case <-importerResponseMocker.ForkchoiceUpdatedCalled: + case <-ctxTimeout.Done(): + t.Fatalf("FAIL: Timeout waiting for beacon node to send engine directive: %v", err) + } + + // Wait a couple of slots here to make sure syncing does not produce a false positive + time.Sleep(time.Duration(config.SlotTime.Uint64()*10) * time.Second) + // Wait for the importer to get an execution payload _, err = importer.WaitForExecutionPayload(ctx, beacon.Slot(safeSlotsToImportOptimistically.Uint64()+safeSlotsImportThreshold)) if err != nil { t.Fatalf("FAIL: Waiting for execution payload on importer: %v", err) } - // Check the time at which the importer finally imported the block - importerSlot := testnet.spec.TimeToSlot(beacon.Timestamp(time.Now().Unix()), testnet.genesisTime) + // Compare heads, the importer must have the same head as the builder, + // and `execution_optimistic==false`. + var ( + importerHeadInfo eth2api.BeaconBlockHeaderAndInfo + builderHeadInfo eth2api.BeaconBlockHeaderAndInfo + ) + ctxTimeout, cancel = context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + if exists, err := beaconapi.BlockHeader(ctxTimeout, importer.API, eth2api.BlockHead, &importerHeadInfo); err != nil { + t.Fatalf("FAIL: Failed to poll head importer head: %v", err) + } else if !exists { + t.Fatalf("FAIL: Failed to poll head importer head: !exists") + } + ctxTimeout, cancel = context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + if exists, err := beaconapi.BlockHeader(ctxTimeout, builder.API, eth2api.BlockHead, &builderHeadInfo); err != nil { + t.Fatalf("FAIL: Failed to poll head builder head: %v", err) + } else if !exists { + t.Fatalf("FAIL: Failed to poll head builder head: !exists") + } - // Delta bewteen the first built execution block and the time when the importer - // finally imports the block must be at least SafeSlotsToImportOptimistically - diff := importerSlot - builderExecutionBlock.Message.Slot - if diff < beacon.Slot(safeSlotsToImportOptimistically.Uint64()) || diff > beacon.Slot(safeSlotsToImportOptimistically.Uint64()+safeSlotsImportThreshold) { - t.Fatalf("FAIL: Execution block imported outside of slot range: SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY=%d, ImporterSlot=%d, BuilderSlot=%d, Diff=%d", safeSlotsToImportOptimistically.Uint64(), importerSlot, builderExecutionBlock.Message.Slot, diff) + if importerHeadInfo.Root != builderHeadInfo.Root { + t.Fatalf("FAIL: importer and builder heads are not equal: %v != %v", importerHeadInfo.Root, builderHeadInfo.Root) } - // Wait for the importer to fully sync and then verify heads - maxTimeout := testnet.SlotsTimeout(5) -forloop: - for { - select { - case <-testnet.SlotsTimeout(1): - if err := VerifyELHeads(testnet, ctx); err == nil { - t.Logf("INFO: EL heads are in sync") - break forloop - } - case <-maxTimeout: - t.Fatalf("FAIL: Timeout waiting for EL Heads to sync up") - case <-ctx.Done(): - t.Fatalf("FAIL: Context done waiting for EL Heads to sync up") - } + var headOptStatus BlockV2OptimisticResponse + ctxTimeout, cancel = context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + if exists, err := eth2api.SimpleRequest(ctxTimeout, importer.API, eth2api.FmtGET("/eth/v2/beacon/blocks/%s", eth2api.BlockHead.BlockId()), &headOptStatus); err != nil { + t.Fatalf("FAIL: Failed to poll head importer head: %v", err) + } else if !exists { + t.Fatalf("FAIL: Failed to poll head importer head: !exists") + } + if headOptStatus.ExecutionOptimistic == true { + t.Fatalf("FAIL: importer still optimistic: execution_optimistic==%t", headOptStatus.ExecutionOptimistic) } } func SyncingWithChainHavingInvalidTransitionBlock(t *hivesim.T, env *testEnv, n node) { var ( - safeSlotsToImportOptimistically = big.NewInt(16) + safeSlotsToImportOptimistically = big.NewInt(8) safeSlotsImportThreshold = uint64(4) ) if clientSafeSlots, ok := SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE[n.ConsensusClient]; ok { @@ -1302,6 +1340,8 @@ func SyncingWithChainHavingInvalidTransitionBlock(t *hivesim.T, env *testEnv, n }, }, }, + AltairForkEpoch: common.Big1, + MergeForkEpoch: common.Big2, Eth1Consensus: setup.Eth1EthashConsensus{ MiningNodes: 2, }, @@ -1312,49 +1352,61 @@ func SyncingWithChainHavingInvalidTransitionBlock(t *hivesim.T, env *testEnv, n defer testnet.stopTestnet() var ( - builder = testnet.beacons[0] - importer = testnet.beacons[1] - importerProxy = testnet.proxies[1] + builder = testnet.BeaconClients()[0] + importer = testnet.BeaconClients()[1] + importerProxy = testnet.Proxies().Running()[1] ) + importerResponseMocker := NewEngineResponseMocker(&EngineResponse{ + // By default we respond SYNCING to any payload + Status: Syncing, + LatestValidHash: nil, + }) + importerResponseMocker.AddCallbacksToProxy(importerProxy) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() + // Wait until the builder creates the first block with an execution payload _, err := testnet.WaitForExecutionPayload(ctx, SlotsUntilMerge(testnet, config)) if err != nil { - t.Fatalf("FAIL: Waiting for execution payload on builder: %v", err) + t.Fatalf("FAIL: Timeout waiting for execution payload on builder: %v", err) } + + // Fetch the first execution block which will be used for verification builderExecutionBlock, err := builder.GetFirstExecutionBeaconBlock(ctx) if err != nil || builderExecutionBlock == nil { t.Fatalf("FAIL: Could not find first execution block") } - transitionPayloadHash := common.BytesToHash(builderExecutionBlock.Message.Body.ExecutionPayload.BlockHash[:]) - t.Logf("Builder Execution block found on slot %d, hash=%s", builderExecutionBlock.Message.Slot, transitionPayloadHash) - // The importer's execution client will invalidate all payloads including the transition payload - callbackCalled := make(chan common.Hash) - zeroHash := common.Hash{} - importerProxy.AddResponseCallback(EngineForkchoiceUpdatedV1, InvalidateExecutionPayloads(EngineForkchoiceUpdatedV1, NewSyncHashes(), &zeroHash, callbackCalled)) + t.Logf("INFO: First execution block: %d, %v", builderExecutionBlock.Message.Slot, builderExecutionBlock.Message.StateRoot) - // Wait here until `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY` slots have passed - safeSlotsTimeout := testnet.SlotsTimeout(beacon.Slot(safeSlotsToImportOptimistically.Uint64() + safeSlotsImportThreshold)) -forloop: - for { - select { - case invalidatedHash := <-callbackCalled: - t.Logf("INFO: Callback invalidated payload %v", invalidatedHash) - break forloop - case <-safeSlotsTimeout: - t.Fatalf("FAIL: Test timeout waiting for importer to optimistically sync the invalid payload") - case <-testnet.SlotsTimeout(1): - t.Logf("INFO: Waiting for importer to try to optimistically sync the invalid payload, realTimeSlot=%d", importer.spec.TimeToSlot(beacon.Timestamp(time.Now().Unix()), importer.genesisTime)) - case <-ctx.Done(): - t.Fatalf("FAIL: Context done while waiting for importer") - } + // We wait until the importer reaches optimistic sync + _, err = importer.WaitForOptimisticState(ctx, beacon.Slot(safeSlotsToImportOptimistically.Uint64()+safeSlotsImportThreshold), eth2api.BlockIdSlot(builderExecutionBlock.Message.Slot), true) + if err != nil { + t.Fatalf("FAIL: Timeout waiting for beacon node to become optimistic: %v", err) + } + + // We invalidate the entire proof-of-stake chain + t.Logf("INFO: Changing default response to INVALID") + importerResponseMocker.SetDefaultResponse(&EngineResponse{ + // The default is now that the execution client returns INVALID + LVH==0x00..00 + Status: Invalid, + LatestValidHash: &(common.Hash{}), + }) + + // Wait until newPayload or forkchoiceUpdated are called at least once + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + select { + case <-importerResponseMocker.NewPayloadCalled: + case <-importerResponseMocker.ForkchoiceUpdatedCalled: + case <-ctxTimeout.Done(): + t.Fatalf("FAIL: Timeout waiting for beacon node to send engine directive: %v", err) } // Wait a couple of slots here to make sure syncing does not produce a false positive - time.Sleep(time.Duration(config.SlotTime.Uint64()+5) * time.Second) + time.Sleep(time.Duration(config.SlotTime.Uint64()*10) * time.Second) // Query the beacon chain head of the importer node, it should still // point to a pre-merge block. @@ -1366,13 +1418,24 @@ forloop: } if headInfo.Header.Message.Slot != (builderExecutionBlock.Message.Slot - 1) { - t.Fatalf("FAIL: Importer head is beyond the invalid execution payload block: importer=%v:%d, builder=%v:%d", headInfo.Root, headInfo.Header.Message.Slot, builderExecutionBlock.Message.StateRoot, builderExecutionBlock.Message.Slot) + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + var headOptStatus BlockV2OptimisticResponse + if exists, err := eth2api.SimpleRequest(ctxTimeout, importer.API, eth2api.FmtGET("/eth/v2/beacon/blocks/%s", eth2api.BlockHead.BlockId()), &headOptStatus); err != nil { + // Block still not synced + fmt.Printf("DEBUG: Queried block %s: %v\n", eth2api.BlockHead.BlockId(), err) + } else if !exists { + // Block still not synced + fmt.Printf("DEBUG: Queried block %s: %v\n", eth2api.BlockHead.BlockId(), err) + } + + t.Fatalf("FAIL: Importer head is beyond the invalid execution payload block: importer=%v:%d, builder=%v:%d, execution_optimistic=%t", headInfo.Root, headInfo.Header.Message.Slot, builderExecutionBlock.Message.StateRoot, builderExecutionBlock.Message.Slot, headOptStatus.ExecutionOptimistic) } } func SyncingWithChainHavingInvalidPostTransitionBlock(t *hivesim.T, env *testEnv, n node) { var ( - safeSlotsToImportOptimistically = big.NewInt(16) + safeSlotsToImportOptimistically = big.NewInt(8) safeSlotsImportThreshold = uint64(4) ) if clientSafeSlots, ok := SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE[n.ConsensusClient]; ok { @@ -1402,6 +1465,8 @@ func SyncingWithChainHavingInvalidPostTransitionBlock(t *hivesim.T, env *testEnv }, }, }, + AltairForkEpoch: common.Big1, + MergeForkEpoch: common.Big2, Eth1Consensus: setup.Eth1EthashConsensus{ MiningNodes: 2, }, @@ -1412,11 +1477,18 @@ func SyncingWithChainHavingInvalidPostTransitionBlock(t *hivesim.T, env *testEnv defer testnet.stopTestnet() var ( - builder = testnet.beacons[0] - importer = testnet.beacons[1] - importerProxy = testnet.proxies[1] + builder = testnet.BeaconClients()[0] + importer = testnet.BeaconClients()[1] + importerProxy = testnet.Proxies().Running()[1] ) + importerResponseMocker := NewEngineResponseMocker(&EngineResponse{ + // By default we respond SYNCING to any payload + Status: Syncing, + LatestValidHash: nil, + }) + importerResponseMocker.AddCallbacksToProxy(importerProxy) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Wait until the builder creates the first block with an execution payload @@ -1424,6 +1496,8 @@ func SyncingWithChainHavingInvalidPostTransitionBlock(t *hivesim.T, env *testEnv if err != nil { t.Fatalf("FAIL: Waiting for execution payload on builder: %v", err) } + + // Fetch the first execution block which will be used for verification builderExecutionBlock, err := builder.GetFirstExecutionBeaconBlock(ctx) if err != nil || builderExecutionBlock == nil { t.Fatalf("FAIL: Could not find first execution block") @@ -1431,30 +1505,37 @@ func SyncingWithChainHavingInvalidPostTransitionBlock(t *hivesim.T, env *testEnv transitionPayloadHash := common.BytesToHash(builderExecutionBlock.Message.Body.ExecutionPayload.BlockHash[:]) t.Logf("Builder Execution block found on slot %d, hash=%s", builderExecutionBlock.Message.Slot, transitionPayloadHash) - // The importer's execution client will invalidate all payloads excluding the transition payload - callbackCalled := make(chan common.Hash) - exceptions := NewSyncHashes(transitionPayloadHash) - importerProxy.AddResponseCallback(EngineForkchoiceUpdatedV1, InvalidateExecutionPayloads(EngineForkchoiceUpdatedV1, exceptions, &transitionPayloadHash, callbackCalled)) + // We wait until the importer reaches optimistic sync + _, err = importer.WaitForOptimisticState(ctx, beacon.Slot(safeSlotsToImportOptimistically.Uint64()+safeSlotsImportThreshold), eth2api.BlockIdSlot(builderExecutionBlock.Message.Slot), true) + if err != nil { + t.Fatalf("FAIL: Timeout waiting for beacon node to become optimistic: %v", err) + } - // Wait here until `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY` slots have passed - safeSlotsTimeout := testnet.SlotsTimeout(beacon.Slot(safeSlotsToImportOptimistically.Uint64() + safeSlotsImportThreshold)) -forloop: - for { - select { - case invalidatedHash := <-callbackCalled: - t.Logf("INFO: Callback invalidated payload %v", invalidatedHash) - break forloop - case <-safeSlotsTimeout: - t.Fatalf("FAIL: Test timeout waiting for importer to optimistically sync the invalid payload") - case <-testnet.SlotsTimeout(1): - t.Logf("INFO: Waiting for importer to try to optimistically sync the invalid payload, realTimeSlot=%d", importer.spec.TimeToSlot(beacon.Timestamp(time.Now().Unix()), importer.genesisTime)) - case <-ctx.Done(): - t.Fatalf("FAIL: Context done while waiting for importer") - } + // We invalidate the chain after the transition payload + importerResponseMocker.AddResponse(transitionPayloadHash, &EngineResponse{ + // Transition payload is valid + Status: Valid, + LatestValidHash: &transitionPayloadHash, + }) + importerResponseMocker.SetDefaultResponse(&EngineResponse{ + // The default is now that the execution client returns INVALID + // with latest valid hash equal to the transition payload + Status: Invalid, + LatestValidHash: &transitionPayloadHash, + }) + + // Wait until newPayload or forkchoiceUpdated are called at least once + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + select { + case <-importerResponseMocker.NewPayloadCalled: + case <-importerResponseMocker.ForkchoiceUpdatedCalled: + case <-ctxTimeout.Done(): + t.Fatalf("FAIL: Timeout waiting for beacon node to send engine directive: %v", err) } // Wait a couple of slots here to make sure syncing does not produce a false positive - time.Sleep(time.Duration(config.SlotTime.Uint64()+5) * time.Second) + time.Sleep(time.Duration((config.SlotTime.Uint64()+5)*uint64(testnet.spec.SECONDS_PER_SLOT)) * time.Second) // Query the beacon chain head of the importer node, it should point to transition payload block. block, err := importer.GetFirstExecutionBeaconBlock(ctx) @@ -1469,8 +1550,8 @@ forloop: func ReOrgSyncWithChainHavingInvalidTerminalBlock(t *hivesim.T, env *testEnv, n node) { var ( - safeSlotsToImportOptimistically = big.NewInt(16) - safeSlotsImportThreshold = uint64(2) + safeSlotsToImportOptimistically = big.NewInt(8) + safeSlotsImportThreshold = uint64(4) ) if clientSafeSlots, ok := SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE[n.ConsensusClient]; ok { safeSlotsToImportOptimistically = clientSafeSlots @@ -1525,13 +1606,23 @@ func ReOrgSyncWithChainHavingInvalidTerminalBlock(t *hivesim.T, env *testEnv, n defer testnet.stopTestnet() var ( - validBuilder = testnet.beacons[0] - validBuilderProxy = testnet.proxies[0] - invalidBuilder = testnet.beacons[1] - importer = testnet.beacons[2] - importerProxy = testnet.proxies[2] + validBuilder = testnet.BeaconClients()[0] + invalidBuilder = testnet.BeaconClients()[1] + importer = testnet.BeaconClients()[2] + validBuilderProxy = testnet.Proxies().Running()[0] + importerProxy = testnet.Proxies().Running()[2] ) + validResponseMocker := NewEngineResponseMocker(&EngineResponse{ + // By default we respond SYNCING to any payload + Status: Syncing, + LatestValidHash: nil, + }) + validResponseMocker.AddGetPayloadPassthroughToProxy(validBuilderProxy) + validResponseMocker.AddCallbacksToProxy(importerProxy) + validResponseMocker.AddCallbacksToProxy(validBuilderProxy) + validResponseMocker.Mocking = false + ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Wait until the builders create their first blocks with an execution payload @@ -1583,51 +1674,13 @@ func ReOrgSyncWithChainHavingInvalidTerminalBlock(t *hivesim.T, env *testEnv, n t.Fatalf("FAIL: Valid builder and importer execution blocks are the unequal: %v == %v", validBuilderPayloadHash, importerPayloadHash) } - // Payloads from the Invalid Builder need to be invalidated by the EL Mock - var ( - validPayloads = NewSyncHashes() - callbackCalled = make(chan common.Hash) - ) - - // From the valid builder we will get all generated payloads, and all of them - // will be exceptions to the list of payloads to invalidate. - getPayloadCallback := func(res []byte, req []byte) *proxy.Spoof { - // Invalidate the transition payload - var ( - payload ExecutableDataV1 - err error - ) - err = UnmarshalFromJsonRPCResponse(res, &payload) - if err != nil { - panic(err) - } - - // Payloads generated by the valid builder are whitelisted. - validPayloads.Add(payload.BlockHash) - t.Logf("INFO: Added hash to the list of exceptions: %s", payload.BlockHash) - return nil - } - validBuilderProxy.AddResponseCallback(EngineGetPayloadV1, getPayloadCallback) - // Then we invalidate all the payloads not found in this list on the validBuilderProxy // and the importer - validBuilderProxy.AddResponseCallback(EngineForkchoiceUpdatedV1, InvalidateExecutionPayloads(EngineForkchoiceUpdatedV1, validPayloads, &common.Hash{}, callbackCalled)) - validBuilderProxy.AddResponseCallback(EngineNewPayloadV1, InvalidateExecutionPayloads(EngineNewPayloadV1, validPayloads, &common.Hash{}, callbackCalled)) - - importerProxy.AddResponseCallback(EngineForkchoiceUpdatedV1, InvalidateExecutionPayloads(EngineForkchoiceUpdatedV1, validPayloads, &common.Hash{}, callbackCalled)) - importerProxy.AddResponseCallback(EngineNewPayloadV1, InvalidateExecutionPayloads(EngineNewPayloadV1, validPayloads, &common.Hash{}, callbackCalled)) - - // Keep log of the payloads received/invalidated - go func(ctx context.Context, c <-chan common.Hash) { - for { - select { - case h := <-c: - t.Logf("INFO: Invalidated payload: %s", h) - case <-ctx.Done(): - return - } - } - }(ctx, callbackCalled) + validResponseMocker.DefaultResponse = &EngineResponse{ + Status: Invalid, + LatestValidHash: &(ethcommon.Hash{}), + } + validResponseMocker.Mocking = true // We need to wait until `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY` pass, plus a couple more slots safeSlotsTimeout := testnet.SlotsTimeout(beacon.Slot(safeSlotsToImportOptimistically.Uint64() + safeSlotsImportThreshold)) @@ -1651,7 +1704,7 @@ forloop: } // Check that the invalid payload hash was not incorporated into the valid builder or the importer. - b, err = BeaconNodes{ + b, err = BeaconClients{ importer, validBuilder, }.GetBeaconBlockByExecutionHash(ctx, invalidBuilderPayloadHash) diff --git a/simulators/eth2/engine/verification.go b/simulators/eth2/engine/verification.go index 487498cd55..3cca72f51b 100644 --- a/simulators/eth2/engine/verification.go +++ b/simulators/eth2/engine/verification.go @@ -23,7 +23,7 @@ import ( // Interface to specify on which slot the verification will be performed type VerificationSlot interface { - Slot(t *Testnet, ctx context.Context, bn *BeaconNode) (common.Slot, error) + Slot(t *Testnet, ctx context.Context, bn *BeaconClient) (common.Slot, error) } // Return the slot at the start of the checkpoint's following epoch @@ -31,7 +31,7 @@ type FirstSlotAfterCheckpoint struct { *common.Checkpoint } -func (c FirstSlotAfterCheckpoint) Slot(t *Testnet, _ context.Context, _ *BeaconNode) (common.Slot, error) { +func (c FirstSlotAfterCheckpoint) Slot(t *Testnet, _ context.Context, _ *BeaconClient) (common.Slot, error) { return t.spec.EpochStartSlot(c.Checkpoint.Epoch + 1) } @@ -40,21 +40,21 @@ type LastSlotAtCheckpoint struct { *common.Checkpoint } -func (c LastSlotAtCheckpoint) Slot(t *Testnet, _ context.Context, _ *BeaconNode) (common.Slot, error) { +func (c LastSlotAtCheckpoint) Slot(t *Testnet, _ context.Context, _ *BeaconClient) (common.Slot, error) { return t.spec.SLOTS_PER_EPOCH * common.Slot(c.Checkpoint.Epoch), nil } // Get last slot according to current time type LastestSlotByTime struct{} -func (l LastestSlotByTime) Slot(t *Testnet, _ context.Context, _ *BeaconNode) (common.Slot, error) { +func (l LastestSlotByTime) Slot(t *Testnet, _ context.Context, _ *BeaconClient) (common.Slot, error) { return t.spec.TimeToSlot(common.Timestamp(time.Now().Unix()), t.genesisTime), nil } // Get last slot according to current head of a beacon node type LastestSlotByHead struct{} -func (l LastestSlotByHead) Slot(t *Testnet, ctx context.Context, bn *BeaconNode) (common.Slot, error) { +func (l LastestSlotByHead) Slot(t *Testnet, ctx context.Context, bn *BeaconClient) (common.Slot, error) { var headInfo eth2api.BeaconBlockHeaderAndInfo if exists, err := beaconapi.BlockHeader(ctx, bn.API, eth2api.BlockHead, &headInfo); err != nil { return common.Slot(0), fmt.Errorf("failed to poll head: %v", err) @@ -67,7 +67,8 @@ func (l LastestSlotByHead) Slot(t *Testnet, ctx context.Context, bn *BeaconNode) // VerifyParticipation ensures that the participation of the finialized epoch // of a given checkpoint is above the expected threshold. func VerifyParticipation(t *Testnet, ctx context.Context, vs VerificationSlot, expected float64) error { - slot, err := vs.Slot(t, ctx, t.verificationBeacons()[0]) + runningBeacons := t.VerificationNodes().BeaconClients().Running() + slot, err := vs.Slot(t, ctx, runningBeacons[0]) if err != nil { return err } @@ -75,7 +76,7 @@ func VerifyParticipation(t *Testnet, ctx context.Context, vs VerificationSlot, e // slot-1 to target last slot in finalized epoch slot = slot - 1 } - for i, b := range t.verificationBeacons() { + for i, b := range runningBeacons { health, err := getHealth(ctx, b.API, t.spec, slot) if err != nil { return err @@ -83,7 +84,7 @@ func VerifyParticipation(t *Testnet, ctx context.Context, vs VerificationSlot, e if health < expected { return fmt.Errorf("beacon %d: participation not healthy (got: %.2f, expected: %.2f)", i, health, expected) } - t.t.Logf("beacon %d: epoch=%d participation=%.2f", i, t.spec.SlotToEpoch(slot), health) + t.Logf("beacon %d: epoch=%d participation=%.2f", i, t.spec.SlotToEpoch(slot), health) } return nil } @@ -92,14 +93,15 @@ func VerifyParticipation(t *Testnet, ctx context.Context, vs VerificationSlot, e // finalized block and verifies that is in the execution client's canonical // chain. func VerifyExecutionPayloadIsCanonical(t *Testnet, ctx context.Context, vs VerificationSlot) error { - slot, err := vs.Slot(t, ctx, t.verificationBeacons()[0]) + runningBeacons := t.VerificationNodes().BeaconClients().Running() + slot, err := vs.Slot(t, ctx, runningBeacons[0]) if err != nil { return err } var blockId eth2api.BlockIdSlot blockId = eth2api.BlockIdSlot(slot) var versionedBlock eth2api.VersionedSignedBeaconBlock - if exists, err := beaconapi.BlockV2(ctx, t.verificationBeacons()[0].API, blockId, &versionedBlock); err != nil { + if exists, err := beaconapi.BlockV2(ctx, runningBeacons[0].API, blockId, &versionedBlock); err != nil { return fmt.Errorf("beacon %d: failed to retrieve block: %v", 0, err) } else if !exists { return fmt.Errorf("beacon %d: block not found", 0) @@ -109,7 +111,7 @@ func VerifyExecutionPayloadIsCanonical(t *Testnet, ctx context.Context, vs Verif } payload := versionedBlock.Data.(*bellatrix.SignedBeaconBlock).Message.Body.ExecutionPayload - for i, proxy := range t.verificationProxies() { + for i, proxy := range t.VerificationNodes().Proxies().Running() { client := ethclient.NewClient(proxy.RPC()) block, err := client.BlockByNumber(ctx, big.NewInt(int64(payload.BlockNumber))) if err != nil { @@ -126,7 +128,7 @@ func VerifyExecutionPayloadIsCanonical(t *Testnet, ctx context.Context, vs Verif // finalized block and verifies that is in the execution client's canonical // chain. func VerifyExecutionPayloadHashInclusion(t *Testnet, ctx context.Context, vs VerificationSlot, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { - for _, bn := range t.verificationBeacons() { + for _, bn := range t.VerificationNodes().BeaconClients().Running() { b, err := VerifyExecutionPayloadHashInclusionNode(t, ctx, vs, bn, hash) if err != nil || b != nil { return b, err @@ -135,37 +137,25 @@ func VerifyExecutionPayloadHashInclusion(t *Testnet, ctx context.Context, vs Ver return nil, nil } -func VerifyExecutionPayloadHashInclusionNode(t *Testnet, ctx context.Context, vs VerificationSlot, bn *BeaconNode, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { +func VerifyExecutionPayloadHashInclusionNode(t *Testnet, ctx context.Context, vs VerificationSlot, bn *BeaconClient, hash ethcommon.Hash) (*bellatrix.SignedBeaconBlock, error) { lastSlot, err := vs.Slot(t, ctx, bn) if err != nil { return nil, err } - /* - enr, _ := bn.ENR() - t.t.Logf("INFO: Looking for block %v in node %v, from slot %d", hash, enr, lastSlot) - */ for slot := lastSlot; slot > 0; slot -= 1 { var versionedBlock eth2api.VersionedSignedBeaconBlock if exists, err := beaconapi.BlockV2(ctx, bn.API, eth2api.BlockIdSlot(slot), &versionedBlock); err != nil { - // t.t.Logf("INFO: Error getting block at slot %d: %v", slot, err) continue } else if !exists { - // t.t.Logf("INFO: Error getting block at slot %d", slot) continue } if versionedBlock.Version != "bellatrix" { // Block can't contain an executable payload - // t.t.Logf("INFO: Breaking at slot %d", slot) break } block := versionedBlock.Data.(*bellatrix.SignedBeaconBlock) payload := block.Message.Body.ExecutionPayload - /* - payloadS, _ := json.MarshalIndent(payload, "", " ") - t.t.Logf("DEBUG: Comparing payload of slot %d: %s", slot, payloadS) - */ if bytes.Compare(payload.BlockHash[:], hash[:]) == 0 { - // t.t.Logf("INFO: Execution block %v found in %d: %v", hash, block.Message.Slot, ethcommon.BytesToHash(payload.BlockHash[:])) return block, nil } } @@ -175,14 +165,15 @@ func VerifyExecutionPayloadHashInclusionNode(t *Testnet, ctx context.Context, vs // VerifyProposers checks that all validator clients have proposed a block on // the finalized beacon chain that includes an execution payload. func VerifyProposers(t *Testnet, ctx context.Context, vs VerificationSlot, allow_empty_blocks bool) error { - lastSlot, err := vs.Slot(t, ctx, t.verificationBeacons()[0]) + runningBeacons := t.VerificationNodes().BeaconClients().Running() + lastSlot, err := vs.Slot(t, ctx, runningBeacons[0]) if err != nil { return err } - proposers := make([]bool, len(t.verificationBeacons())) + proposers := make([]bool, len(runningBeacons)) for slot := common.Slot(0); slot <= lastSlot; slot += 1 { var versionedBlock eth2api.VersionedSignedBeaconBlock - if exists, err := beaconapi.BlockV2(ctx, t.verificationBeacons()[0].API, eth2api.BlockIdSlot(slot), &versionedBlock); err != nil { + if exists, err := beaconapi.BlockV2(ctx, runningBeacons[0].API, eth2api.BlockIdSlot(slot), &versionedBlock); err != nil { if allow_empty_blocks { continue } @@ -207,7 +198,7 @@ func VerifyProposers(t *Testnet, ctx context.Context, vs VerificationSlot, allow } var validator eth2api.ValidatorResponse - if exists, err := beaconapi.StateValidator(ctx, t.verificationBeacons()[0].API, eth2api.StateIdSlot(slot), eth2api.ValidatorIdIndex(proposerIndex), &validator); err != nil { + if exists, err := beaconapi.StateValidator(ctx, runningBeacons[0].API, eth2api.StateIdSlot(slot), eth2api.ValidatorIdIndex(proposerIndex), &validator); err != nil { return fmt.Errorf("beacon %d: failed to retrieve validator: %v", 0, err) } else if !exists { return fmt.Errorf("beacon %d: validator not found", 0) @@ -227,9 +218,11 @@ func VerifyProposers(t *Testnet, ctx context.Context, vs VerificationSlot, allow } func VerifyELBlockLabels(t *Testnet, ctx context.Context) error { - for i := 0; i < len(t.verificationExecution()); i++ { - el := t.verificationExecution()[i] - bn := t.verificationBeacons()[i] + runningExecution := t.VerificationNodes().ExecutionClients().Running() + runningBeacons := t.VerificationNodes().BeaconClients().Running() + for i := 0; i < len(runningExecution); i++ { + el := runningExecution[i] + bn := runningBeacons[i] // Get the head var headInfo eth2api.BeaconBlockHeaderAndInfo if exists, err := beaconapi.BlockHeader(ctx, bn.API, eth2api.BlockHead, &headInfo); err != nil { @@ -296,15 +289,16 @@ func VerifyELBlockLabels(t *Testnet, ctx context.Context) error { } func VerifyELHeads(t *Testnet, ctx context.Context) error { - client := ethclient.NewClient(t.verificationExecution()[0].RPC()) + runningExecution := t.VerificationNodes().ExecutionClients().Running() + client := ethclient.NewClient(runningExecution[0].HiveClient.RPC()) head, err := client.HeaderByNumber(ctx, nil) if err != nil { return err } - t.t.Logf("Verifying EL heads at %v", head.Hash()) - for i, node := range t.verificationExecution() { - client := ethclient.NewClient(node.RPC()) + t.Logf("Verifying EL heads at %v", head.Hash()) + for i, node := range runningExecution { + client := ethclient.NewClient(node.HiveClient.RPC()) head2, err := client.HeaderByNumber(ctx, nil) if err != nil { return err From f90c37ad24ef01853544cc84be2720c79dedae9f Mon Sep 17 00:00:00 2001 From: marioevz Date: Wed, 17 Aug 2022 21:40:03 +0000 Subject: [PATCH 2/8] simulators/eth2/engine: No viable head due to opt sync --- simulators/eth2/engine/main.go | 1 + simulators/eth2/engine/scenarios.go | 254 ++++++++++++++++++++++++++++ 2 files changed, 255 insertions(+) diff --git a/simulators/eth2/engine/main.go b/simulators/eth2/engine/main.go index 08c21a1641..a3547ffd56 100644 --- a/simulators/eth2/engine/main.go +++ b/simulators/eth2/engine/main.go @@ -42,6 +42,7 @@ var transitionTests = []testSpec{ {Name: "syncing-with-chain-having-invalid-transition-block", Run: SyncingWithChainHavingInvalidTransitionBlock}, {Name: "syncing-with-chain-having-invalid-post-transition-block", Run: SyncingWithChainHavingInvalidPostTransitionBlock}, {Name: "re-org-and-sync-with-chain-having-invalid-terminal-block", Run: ReOrgSyncWithChainHavingInvalidTerminalBlock}, + {Name: "no-viable-head-due-to-optimistic-sync", Run: NoViableHeadDueToOptimisticSync}, } func main() { diff --git a/simulators/eth2/engine/scenarios.go b/simulators/eth2/engine/scenarios.go index dea12b3f98..e3977763ac 100644 --- a/simulators/eth2/engine/scenarios.go +++ b/simulators/eth2/engine/scenarios.go @@ -1715,3 +1715,257 @@ forloop: t.Fatalf("FAIL: Invalid beacon block (incorrect TTD) was incorporated after optimistic sync: %v", b) } } + +func NoViableHeadDueToOptimisticSync(t *hivesim.T, env *testEnv, n node) { + var ( + safeSlotsToImportOptimistically = big.NewInt(8) + // safeSlotsImportThreshold = uint64(4) + ) + if clientSafeSlots, ok := SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY_CLIENT_OVERRIDE[n.ConsensusClient]; ok { + safeSlotsToImportOptimistically = clientSafeSlots + } + + config := getClientConfig(n).join(&Config{ + Nodes: Nodes{ + // Builder 1 + node{ + ExecutionClient: n.ExecutionClient, + ConsensusClient: n.ConsensusClient, + ValidatorShares: 1, + }, + // Importer + node{ + ExecutionClient: n.ExecutionClient, + ConsensusClient: n.ConsensusClient, + ValidatorShares: 0, + TestVerificationNode: true, + }, + // Builder 2 + node{ + ExecutionClient: n.ExecutionClient, + ConsensusClient: n.ConsensusClient, + // We are going to duplicate keys from builder 1 + ValidatorShares: 0, + // Don't start until later in the run + DisableStartup: true, + }, + }, + AltairForkEpoch: common.Big1, + MergeForkEpoch: big.NewInt(4), // Slot 128 + Eth1Consensus: setup.Eth1EthashConsensus{ + MiningNodes: 2, + }, + SafeSlotsToImportOptimistically: safeSlotsToImportOptimistically, + }) + + testnet := startTestnet(t, env, config) + defer testnet.stopTestnet() + + var ( + builder1 = testnet.BeaconClients()[0] + importer = testnet.BeaconClients()[1] + builder1Proxy = testnet.Proxies().Running()[0] + importerProxy = testnet.Proxies().Running()[1] + builder2Proxy *Proxy // Not yet started + ) + + importerNewPayloadResponseMocker := NewEngineResponseMocker(nil) + importerFcUResponseMocker := NewEngineResponseMocker(nil) + importerNewPayloadResponseMocker.AddNewPayloadCallbackToProxy(importerProxy) + importerFcUResponseMocker.AddForkchoiceUpdatedCallbackToProxy(importerProxy) + + // We will count the number of payloads produced by builder 1. + // On Payload 33, the test continues. + var ( + getPayloadCount int + latestValidHash common.Hash + invalidHashes = make([]common.Hash, 0) + invalidPayload common.Hash + ) + getPayloadCallback := func(res []byte, req []byte) *proxy.Spoof { + getPayloadCount++ + var ( + payload ExecutableDataV1 + err error + ) + err = UnmarshalFromJsonRPCResponse(res, &payload) + if err != nil { + panic(err) + } + if getPayloadCount >= 10 && getPayloadCount <= 32 { + if getPayloadCount == 10 { + latestValidHash = payload.BlockHash + } else { + invalidHashes = append(invalidHashes, payload.BlockHash) + } + + // Return syncing for these payloads + importerNewPayloadResponseMocker.AddResponse(payload.BlockHash, &EngineResponse{ + Status: Syncing, + LatestValidHash: nil, + }) + importerFcUResponseMocker.AddResponse(payload.BlockHash, &EngineResponse{ + Status: Syncing, + LatestValidHash: nil, + }) + } else if getPayloadCount >= 33 { + // Invalidate these payloads + if getPayloadCount == 33 { + invalidPayload = payload.BlockHash + for _, h := range invalidHashes { + importerNewPayloadResponseMocker.AddResponse(h, &EngineResponse{ + Status: Invalid, + LatestValidHash: &latestValidHash, + }) + importerFcUResponseMocker.AddResponse(h, &EngineResponse{ + Status: Invalid, + LatestValidHash: &latestValidHash, + }) + } + + // Validate latest valid hash too + importerNewPayloadResponseMocker.AddResponse(latestValidHash, &EngineResponse{ + Status: Valid, + LatestValidHash: &latestValidHash, + }) + importerFcUResponseMocker.AddResponse(latestValidHash, &EngineResponse{ + Status: Valid, + LatestValidHash: &latestValidHash, + }) + + } + + invalidHashes = append(invalidHashes, payload.BlockHash) + importerNewPayloadResponseMocker.AddResponse(payload.BlockHash, &EngineResponse{ + Status: Invalid, + LatestValidHash: &latestValidHash, + }) + importerFcUResponseMocker.AddResponse(payload.BlockHash, &EngineResponse{ + Status: Invalid, + LatestValidHash: &latestValidHash, + }) + + } + return nil + } + builder1Proxy.AddResponseCallback(EngineGetPayloadV1, getPayloadCallback) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + _, err = testnet.WaitForExecutionPayload(ctx, 1000) + if err != nil { + t.Fatalf("Error waiting for execution payload") + } +forloop: + for { + select { + case eR := <-importerNewPayloadResponseMocker.NewPayloadCalled: + if invalidPayload != (common.Hash{}) && eR.Hash == invalidPayload && eR.Response.Status == Invalid { + // The payload has been invalidated + t.Logf("INFO: Payload %s was correctly invalidated (NewPayload)", invalidPayload) + break forloop + } + case eR := <-importerFcUResponseMocker.ForkchoiceUpdatedCalled: + if invalidPayload != (common.Hash{}) && eR.Hash == invalidPayload { + if eR.Response.Status == Invalid { + // The payload has been invalidated + t.Logf("INFO: Payload %s was correctly invalidated (ForkchoiceUpdated)", invalidPayload) + break forloop + } else { + t.Fatalf("FAIL: Payload was not invalidated") + } + } + case <-ctx.Done(): + t.Fatalf("FAIL: Context done while waiting for invalidated payload") + } + } + + // Sleep a few seconds so the invalid payload is incorporated into the chain + time.Sleep(time.Duration(new(big.Int).Div(config.SlotTime, common.Big2).Int64()/2) * time.Second) + + // We need to check that the latestValidHash Block is indeed optimistic + // First look for the block on the builder + lvhBeaconBlock, err := builder1.GetBeaconBlockByExecutionHash(ctx, latestValidHash) + if err != nil { + t.Fatalf("FAIL: Error querying latest valid hash from builder 1: %v", err) + } + t.Logf("INFO: latest valid hash from builder 1: slot %d, root %v", lvhBeaconBlock.Message.Slot, lvhBeaconBlock.Message.StateRoot) + + lastInvalidBeaconBlock, err := builder1.GetBeaconBlockByExecutionHash(ctx, invalidPayload) + if err != nil { + t.Fatalf("FAIL: Error querying latest invalid hash from builder 1: %v", err) + } + t.Logf("INFO: latest invalid hash from builder 1: slot %d, root %v", lastInvalidBeaconBlock.Message.Slot, lastInvalidBeaconBlock.Message.StateRoot) + + // Check whether the importer is still optimistic for these blocks + var ( + retriesLeft = 20 + ) + for { + // Retry several times in order to give the node some time to re-org if necessary + if retriesLeft--; retriesLeft == 0 { + importer.PrintAllBeaconBlocks(ctx) + t.Fatalf("FAIL: Unable to get latestValidHash block: %v", err) + } + time.Sleep(time.Second) + t.Logf("INFO: retry %d to obtain beacon block at height %d", 20-retriesLeft, lvhBeaconBlock.Message.Slot) + + if opt, err := importer.CheckBlockIsOptimistic(ctx, eth2api.BlockIdSlot(lvhBeaconBlock.Message.Slot)); err != nil { + continue + } else if opt { + t.Logf("INFO: Payload %s is optimistic from the importer's perspective", latestValidHash) + break + } else { + t.Logf("INFO: Payload %s is NOT optimistic from the importer's perspective", latestValidHash) + break + } + } + + // Shutdown the builder 1 + builder1.Shutdown() + + // Start builder 2 + // First start the execution node to set the proxy + testnet.ExecutionClients()[2].Start() + + builder2Proxy = testnet.ExecutionClients()[2].Proxy() + + if builder2Proxy == nil { + t.Fatalf("FAIL: Proxy failed to start") + } + + importerNewPayloadResponseMocker.AddNewPayloadCallbackToProxy(builder2Proxy) + importerFcUResponseMocker.AddForkchoiceUpdatedCallbackToProxy(builder2Proxy) + + // Then start the beacon node + testnet.BeaconClients()[2].Start() + // Finally start the validator client reusing the keys of the first builder + testnet.ValidatorClients()[2].Keys = testnet.ValidatorClients()[0].Keys + testnet.ValidatorClients()[2].Start() + + c, err := testnet.WaitForCurrentEpochFinalization(ctx, testnet.spec.SLOTS_PER_EPOCH*3) + if err != nil { + t.Fatalf("FAIL: Error waiting for finality after builder 2 started: %v", err) + } + t.Logf("INFO: Finality reached after builder 2 started: epoch %v", c.Epoch) + + // Check that importer is no longer optimistic + if opt, err := importer.CheckBlockIsOptimistic(ctx, eth2api.BlockHead); err != nil { + t.Fatalf("FAIL: Error querying optimistic status after finalization on importer: %v", err) + } else if opt { + t.Fatalf("FAIL: Importer is still optimistic after finalization: execution_optimistic=%t", opt) + } + + // Check that neither the first invalid payload nor the last invalid payload are included in the importer + if b, err := importer.GetBeaconBlockByExecutionHash(ctx, invalidHashes[0]); err != nil { + t.Fatalf("FAIL: Error querying invalid payload: %v", err) + } else if b != nil { + t.Fatalf("FAIL: Invalid payload found in importer chain: %d, %v", b.Message.Slot, b.Message.StateRoot) + } + if b, err := importer.GetBeaconBlockByExecutionHash(ctx, invalidPayload); err != nil { + t.Fatalf("FAIL: Error querying invalid payload: %v", err) + } else if b != nil { + t.Fatalf("FAIL: Invalid payload found in importer chain: %d, %v", b.Message.Slot, b.Message.StateRoot) + } +} From 00c5cdd619fa0c0cf1bca3bf9d028f8844ce5eb4 Mon Sep 17 00:00:00 2001 From: marioevz Date: Thu, 18 Aug 2022 01:25:28 +0000 Subject: [PATCH 3/8] simulators/eth2/engine: add optional extra options on client start --- simulators/eth2/engine/nodes.go | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/simulators/eth2/engine/nodes.go b/simulators/eth2/engine/nodes.go index 4fd389ac81..fdc5884e03 100644 --- a/simulators/eth2/engine/nodes.go +++ b/simulators/eth2/engine/nodes.go @@ -71,7 +71,7 @@ func (en *ExecutionClient) MustGetEnode() string { return addr } -func (en *ExecutionClient) Start() error { +func (en *ExecutionClient) Start(extraOptions ...hivesim.StartOption) error { if en.HiveClient != nil { return fmt.Errorf("Client already started") } @@ -80,6 +80,9 @@ func (en *ExecutionClient) Start() error { if err != nil { return fmt.Errorf("Unable to get start options: %v", err) } + for _, opt := range extraOptions { + opts = append(opts, opt) + } en.HiveClient = en.T.StartClient(en.ClientType, opts...) // Prepare proxy @@ -183,7 +186,7 @@ func NewBeaconClient(t *hivesim.T, beaconDef *hivesim.ClientDefinition, optionsG } } -func (bn *BeaconClient) Start() error { +func (bn *BeaconClient) Start(extraOptions ...hivesim.StartOption) error { if bn.HiveClient != nil { return fmt.Errorf("Client already started") } @@ -192,6 +195,9 @@ func (bn *BeaconClient) Start() error { if err != nil { return fmt.Errorf("Unable to get start options: %v", err) } + for _, opt := range extraOptions { + opts = append(opts, opt) + } bn.HiveClient = bn.T.StartClient(bn.ClientType, opts...) bn.API = ð2api.Eth2HttpClient{ Addr: fmt.Sprintf("http://%s:%d", bn.HiveClient.IP, PortBeaconAPI), @@ -489,7 +495,7 @@ func NewValidatorClient(t *hivesim.T, validatorDef *hivesim.ClientDefinition, op } } -func (vc *ValidatorClient) Start() error { +func (vc *ValidatorClient) Start(extraOptions ...hivesim.StartOption) error { if vc.HiveClient != nil { return fmt.Errorf("Client already started") } @@ -502,6 +508,9 @@ func (vc *ValidatorClient) Start() error { if err != nil { return fmt.Errorf("Unable to get start options: %v", err) } + for _, opt := range extraOptions { + opts = append(opts, opt) + } vc.HiveClient = vc.T.StartClient(vc.ClientType, opts...) return nil } @@ -546,24 +555,24 @@ type NodeClientBundle struct { } // Starts all clients included in the bundle -func (cb *NodeClientBundle) Start() error { +func (cb *NodeClientBundle) Start(extraOptions ...hivesim.StartOption) error { cb.T.Logf("Starting validator client bundle %d", cb.Index) if cb.ExecutionClient != nil { - if err := cb.ExecutionClient.Start(); err != nil { + if err := cb.ExecutionClient.Start(extraOptions...); err != nil { return err } } else { cb.T.Logf("No execution client started") } if cb.BeaconClient != nil { - if err := cb.BeaconClient.Start(); err != nil { + if err := cb.BeaconClient.Start(extraOptions...); err != nil { return err } } else { cb.T.Logf("No beacon client started") } if cb.ValidatorClient != nil { - if err := cb.ValidatorClient.Start(); err != nil { + if err := cb.ValidatorClient.Start(extraOptions...); err != nil { return err } } else { From 42035c3ac7a76bd40277827e39b2f3549f857b3c Mon Sep 17 00:00:00 2001 From: marioevz Date: Thu, 18 Aug 2022 16:53:09 +0000 Subject: [PATCH 4/8] simulators/eth2/engine: fix shutdown --- simulators/eth2/engine/nodes.go | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/simulators/eth2/engine/nodes.go b/simulators/eth2/engine/nodes.go index fdc5884e03..de2b7ce7a1 100644 --- a/simulators/eth2/engine/nodes.go +++ b/simulators/eth2/engine/nodes.go @@ -102,8 +102,7 @@ func (en *ExecutionClient) Start(extraOptions ...hivesim.StartOption) error { } func (en *ExecutionClient) Shutdown() error { - _, err := en.HiveClient.Exec("shutdown.sh") - if err != nil { + if err := en.T.Sim.StopClient(en.T.SuiteID, en.T.TestID, en.HiveClient.Container); err != nil { return err } en.HiveClient = nil @@ -208,8 +207,7 @@ func (bn *BeaconClient) Start(extraOptions ...hivesim.StartOption) error { } func (bn *BeaconClient) Shutdown() error { - _, err := bn.HiveClient.Exec("shutdown.sh") - if err != nil { + if err := bn.T.Sim.StopClient(bn.T.SuiteID, bn.T.TestID, bn.HiveClient.Container); err != nil { return err } bn.HiveClient = nil @@ -515,6 +513,14 @@ func (vc *ValidatorClient) Start(extraOptions ...hivesim.StartOption) error { return nil } +func (vc *ValidatorClient) Shutdown() error { + if err := vc.T.Sim.StopClient(vc.T.SuiteID, vc.T.TestID, vc.HiveClient.Container); err != nil { + return err + } + vc.HiveClient = nil + return nil +} + func (vc *ValidatorClient) IsRunning() bool { return vc.HiveClient != nil } @@ -581,6 +587,19 @@ func (cb *NodeClientBundle) Start(extraOptions ...hivesim.StartOption) error { return nil } +func (cb *NodeClientBundle) Shutdown() error { + if err := cb.ExecutionClient.Shutdown(); err != nil { + return err + } + if err := cb.BeaconClient.Shutdown(); err != nil { + return err + } + if err := cb.ValidatorClient.Shutdown(); err != nil { + return err + } + return nil +} + type NodeClientBundles []NodeClientBundle // Return all execution clients, even the ones not currently running From d85760f9edf13c0ca5ea80c4587a7f3f4bfa868c Mon Sep 17 00:00:00 2001 From: marioevz Date: Thu, 18 Aug 2022 16:53:35 +0000 Subject: [PATCH 5/8] simulators/eth2/engine: fix optimistic expectation --- simulators/eth2/engine/scenarios.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/simulators/eth2/engine/scenarios.go b/simulators/eth2/engine/scenarios.go index e3977763ac..020faef986 100644 --- a/simulators/eth2/engine/scenarios.go +++ b/simulators/eth2/engine/scenarios.go @@ -1882,7 +1882,7 @@ forloop: } // Sleep a few seconds so the invalid payload is incorporated into the chain - time.Sleep(time.Duration(new(big.Int).Div(config.SlotTime, common.Big2).Int64()/2) * time.Second) + time.Sleep(time.Duration(config.SlotTime.Int64()/2) * time.Second) // We need to check that the latestValidHash Block is indeed optimistic // First look for the block on the builder @@ -1911,14 +1911,13 @@ forloop: time.Sleep(time.Second) t.Logf("INFO: retry %d to obtain beacon block at height %d", 20-retriesLeft, lvhBeaconBlock.Message.Slot) - if opt, err := importer.CheckBlockIsOptimistic(ctx, eth2api.BlockIdSlot(lvhBeaconBlock.Message.Slot)); err != nil { + if opt, err := importer.CheckBlockIsOptimistic(ctx, eth2api.BlockHead); err != nil { continue } else if opt { - t.Logf("INFO: Payload %s is optimistic from the importer's perspective", latestValidHash) + t.Logf("INFO: Head is optimistic from the importer's perspective") break } else { - t.Logf("INFO: Payload %s is NOT optimistic from the importer's perspective", latestValidHash) - break + t.Fatalf("FAIL: Head is NOT optimistic from the importer's perspective") } } @@ -1927,7 +1926,9 @@ forloop: // Start builder 2 // First start the execution node to set the proxy - testnet.ExecutionClients()[2].Start() + if err := testnet.ExecutionClients()[2].Start(); err != nil { + t.Fatalf("FAIL: Unable to start execution client: %v", err) + } builder2Proxy = testnet.ExecutionClients()[2].Proxy() @@ -1939,10 +1940,14 @@ forloop: importerFcUResponseMocker.AddForkchoiceUpdatedCallbackToProxy(builder2Proxy) // Then start the beacon node - testnet.BeaconClients()[2].Start() + if err := testnet.BeaconClients()[2].Start(); err != nil { + t.Fatalf("FAIL: Unable to start beacon client: %v", err) + } // Finally start the validator client reusing the keys of the first builder testnet.ValidatorClients()[2].Keys = testnet.ValidatorClients()[0].Keys - testnet.ValidatorClients()[2].Start() + if err := testnet.ValidatorClients()[2].Start(); err != nil { + t.Fatalf("FAIL: Unable to start validator client: %v", err) + } c, err := testnet.WaitForCurrentEpochFinalization(ctx, testnet.spec.SLOTS_PER_EPOCH*3) if err != nil { From 2d47c8cb5f2799c2452ab9be2fde522f4de411f3 Mon Sep 17 00:00:00 2001 From: marioevz Date: Fri, 19 Aug 2022 04:11:51 +0000 Subject: [PATCH 6/8] simulators/eth2/engine: More info print --- simulators/eth2/engine/running_testnet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simulators/eth2/engine/running_testnet.go b/simulators/eth2/engine/running_testnet.go index b9e595aa3f..b611cb62ef 100644 --- a/simulators/eth2/engine/running_testnet.go +++ b/simulators/eth2/engine/running_testnet.go @@ -366,7 +366,7 @@ func (t *Testnet) WaitForCurrentEpochFinalization(ctx context.Context, timeoutSl fmt.Printf("WARN: beacon %d: %s\n", i, err) } - ch <- res{i, fmt.Sprintf("beacon %d: slot=%d, head=%s, health=%.2f, exec_payload=%s, justified=%s, finalized=%s", i, slot, head, health, execution, justified, finalized), nil} + ch <- res{i, fmt.Sprintf("beacon %d: slot=%d, head=%s, health=%.2f, exec_payload=%s, justified=%s, finalized=%s, epoch_to_finalize=%d", i, slot, head, health, execution, justified, finalized, epochToBeFinalized), nil} if checkpoints.Finalized != (common.Checkpoint{}) && checkpoints.Finalized.Epoch >= epochToBeFinalized { done <- checkpoints.Finalized From f285a5aa5340ffc9b3b4ab67b8d62e8c0c75e4ce Mon Sep 17 00:00:00 2001 From: marioevz Date: Fri, 19 Aug 2022 15:22:54 +0000 Subject: [PATCH 7/8] simulators/eth2/engine: Add debug printing --- simulators/eth2/engine/nodes.go | 1 + simulators/eth2/engine/scenarios.go | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/simulators/eth2/engine/nodes.go b/simulators/eth2/engine/nodes.go index de2b7ce7a1..35960649d0 100644 --- a/simulators/eth2/engine/nodes.go +++ b/simulators/eth2/engine/nodes.go @@ -272,6 +272,7 @@ func (b *BeaconClient) PrintAllBeaconBlocks(ctx context.Context) error { } else if !exists { return fmt.Errorf("PrintAllBeaconBlocks: failed to poll head: !exists") } + fmt.Printf("PrintAllBeaconBlocks: Printing beacon chain from %s\n", b.HiveClient.Container) fmt.Printf("PrintAllBeaconBlocks: Head, slot %d, root %v\n", headInfo.Header.Message.Slot, headInfo.Root) for i := 1; i <= int(headInfo.Header.Message.Slot); i++ { var bHeader eth2api.BeaconBlockHeaderAndInfo diff --git a/simulators/eth2/engine/scenarios.go b/simulators/eth2/engine/scenarios.go index 020faef986..6367d82216 100644 --- a/simulators/eth2/engine/scenarios.go +++ b/simulators/eth2/engine/scenarios.go @@ -1766,7 +1766,9 @@ func NoViableHeadDueToOptimisticSync(t *hivesim.T, env *testEnv, n node) { importer = testnet.BeaconClients()[1] builder1Proxy = testnet.Proxies().Running()[0] importerProxy = testnet.Proxies().Running()[1] - builder2Proxy *Proxy // Not yet started + // Not yet started + builder2 = testnet.BeaconClients()[2] + builder2Proxy *Proxy ) importerNewPayloadResponseMocker := NewEngineResponseMocker(nil) @@ -1951,6 +1953,8 @@ forloop: c, err := testnet.WaitForCurrentEpochFinalization(ctx, testnet.spec.SLOTS_PER_EPOCH*3) if err != nil { + importer.PrintAllBeaconBlocks(ctx) + builder2.PrintAllBeaconBlocks(ctx) t.Fatalf("FAIL: Error waiting for finality after builder 2 started: %v", err) } t.Logf("INFO: Finality reached after builder 2 started: epoch %v", c.Epoch) From c8af5d8ff51c75d1995d29c1a35496abe6bd17b6 Mon Sep 17 00:00:00 2001 From: marioevz Date: Fri, 19 Aug 2022 20:39:39 +0000 Subject: [PATCH 8/8] simulators/eth2/engine: Remove unnecessary print --- simulators/eth2/engine/running_testnet.go | 28 +---------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/simulators/eth2/engine/running_testnet.go b/simulators/eth2/engine/running_testnet.go index b611cb62ef..f3f744a152 100644 --- a/simulators/eth2/engine/running_testnet.go +++ b/simulators/eth2/engine/running_testnet.go @@ -313,8 +313,6 @@ func (t *Testnet) WaitForCurrentEpochFinalization(ctx context.Context, timeoutSl head string justified string finalized string - execution string - health float64 ) var headInfo eth2api.BeaconBlockHeaderAndInfo @@ -337,36 +335,12 @@ func (t *Testnet) WaitForCurrentEpochFinalization(ctx context.Context, timeoutSl } } - var versionedBlock eth2api.VersionedSignedBeaconBlock - if exists, err := beaconapi.BlockV2(ctx, b.API, eth2api.BlockIdRoot(headInfo.Root), &versionedBlock); err != nil { - ch <- res{err: fmt.Errorf("beacon %d: failed to retrieve block: %v", i, err)} - return - } else if !exists { - ch <- res{err: fmt.Errorf("beacon %d: block not found", i)} - return - } - switch versionedBlock.Version { - case "phase0": - execution = "0x0000..0000" - case "altair": - execution = "0x0000..0000" - case "bellatrix": - block := versionedBlock.Data.(*bellatrix.SignedBeaconBlock) - execution = shorten(block.Message.Body.ExecutionPayload.BlockHash.String()) - } - slot = headInfo.Header.Message.Slot head = shorten(headInfo.Root.String()) justified = shorten(checkpoints.CurrentJustified.String()) finalized = shorten(checkpoints.Finalized.String()) - health, err := getHealth(ctx, b.API, t.spec, slot) - if err != nil { - // warning is printed here instead because some clients - // don't support the required REST endpoint. - fmt.Printf("WARN: beacon %d: %s\n", i, err) - } - ch <- res{i, fmt.Sprintf("beacon %d: slot=%d, head=%s, health=%.2f, exec_payload=%s, justified=%s, finalized=%s, epoch_to_finalize=%d", i, slot, head, health, execution, justified, finalized, epochToBeFinalized), nil} + ch <- res{i, fmt.Sprintf("beacon %d: slot=%d, head=%s justified=%s, finalized=%s, epoch_to_finalize=%d", i, slot, head, justified, finalized, epochToBeFinalized), nil} if checkpoints.Finalized != (common.Checkpoint{}) && checkpoints.Finalized.Epoch >= epochToBeFinalized { done <- checkpoints.Finalized