diff --git a/beacon_chain/consensus_object_pools/block_clearance.nim b/beacon_chain/consensus_object_pools/block_clearance.nim index 05b9be75f9..59c124d40c 100644 --- a/beacon_chain/consensus_object_pools/block_clearance.nim +++ b/beacon_chain/consensus_object_pools/block_clearance.nim @@ -34,12 +34,13 @@ proc addResolvedHeadBlock( trustedBlock: ForkyTrustedSignedBeaconBlock, optimisticStatus: OptimisticStatus, parent: BlockRef, cache: var StateCache, - onBlockAdded: OnForkyBlockAdded, + onBlockAdded: OnBlockAdded, stateDataDur, sigVerifyDur, stateVerifyDur: Duration ): BlockRef = doAssert state.matches_block_slot( trustedBlock.root, trustedBlock.message.slot), "Given state must have the new block applied" + const consensusFork = typeof(trustedBlock).kind let blockRoot = trustedBlock.root @@ -100,12 +101,15 @@ proc addResolvedHeadBlock( # Notify others of the new block before processing the quarantine, such that # notifications for parents happens before those of the children if onBlockAdded != nil: - let unrealized = withState(state): + let unrealized = when consensusFork >= ConsensusFork.Altair: - forkyState.data.compute_unrealized_finality() + state.forky(consensusFork).data.compute_unrealized_finality() else: - forkyState.data.compute_unrealized_finality(cache) - onBlockAdded(blockRef, trustedBlock, epochRef, unrealized) + state.forky(consensusFork).data.compute_unrealized_finality(cache) + onBlockAdded( + blockRef, trustedBlock, state.forky(consensusFork).data, epochRef, unrealized + ) + if not(isNil(dag.onBlockAdded)): dag.onBlockAdded(ForkedTrustedSignedBeaconBlock.init(trustedBlock)) @@ -135,29 +139,6 @@ proc checkStateTransition( else: ok() -proc advanceClearanceState*(dag: ChainDAGRef, nextSlot: Slot) = - # When the chain is synced, the most likely block to be produced is the block - # right after head - we can exploit this assumption and advance the state - # to that slot before the block arrives, thus allowing us to do the expensive - # epoch transition ahead of time. - # Notably, we use the clearance state here because that's where the block will - # first be seen - later, this state will be copied to the head state! - let head = dag.head - if dag.clearanceState.matches_block_slot(head.root, nextSlot): - return - - let startTick = Moment.now() - var cache = StateCache() - if dag.updateState( - dag.clearanceState, - BlockSlotId.init(head.bid, nextSlot), - true, - cache, - dag.updateFlags, - ): - debug "Prepared clearance state for next block", - nextSlot, head, updateStateDur = Moment.now() - startTick - proc checkHeadBlock*( dag: ChainDAGRef, signedBlock: ForkySignedBeaconBlock): Result[BlockRef, VerifierError] = @@ -228,7 +209,7 @@ proc checkHeadBlock*( proc addHeadBlockWithParent*( dag: ChainDAGRef, verifier: var BatchVerifier, signedBlock: ForkySignedBeaconBlock, parent: BlockRef, - optimisticStatus: OptimisticStatus, onBlockAdded: OnForkyBlockAdded + optimisticStatus: OptimisticStatus, onBlockAdded: OnBlockAdded ): Result[BlockRef, VerifierError] = ## Try adding a block to the chain, verifying first that it passes the state ## transition function and contains correct cryptographic signature. @@ -347,8 +328,7 @@ proc addBackfillBlock*( info "Invalid genesis block signature" return err(VerifierError.Invalid) else: - let proposerKey = dag.validatorKey(blck.proposer_index) - if proposerKey.isNone(): + let proposerKey = dag.validatorKey(blck.proposer_index).valueOr: # We've verified that the block root matches our expectations by following # the chain of parents all the way from checkpoint. If all those blocks # were valid, the proposer_index in this block must also be valid, and we @@ -365,7 +345,7 @@ proc addBackfillBlock*( getStateField(dag.headState, genesis_validators_root), blck.slot, signedBlock.root, - proposerKey.get(), + proposerKey, signedBlock.signature): info "Block signature verification failed" return err(VerifierError.Invalid) @@ -455,26 +435,6 @@ proc addBackfillBlock*( ok() -template BlockAdded(kind: static ConsensusFork): untyped = - when kind == ConsensusFork.Gloas: - OnGloasBlockAdded - elif kind == ConsensusFork.Fulu: - OnFuluBlockAdded - elif kind == ConsensusFork.Electra: - OnElectraBlockAdded - elif kind == ConsensusFork.Deneb: - OnDenebBlockAdded - elif kind == ConsensusFork.Capella: - OnCapellaBlockAdded - elif kind == ConsensusFork.Bellatrix: - OnBellatrixBlockAdded - elif kind == ConsensusFork.Altair: - OnAltairBlockAdded - elif kind == ConsensusFork.Phase0: - OnPhase0BlockAdded - else: - static: raiseAssert "Unreachable" - proc verifyBlockProposer*( verifier: var BatchVerifier, fork: Fork, @@ -494,73 +454,57 @@ proc verifyBlockProposer*( proc addBackfillBlockData*( dag: ChainDAGRef, + consensusFork: static ConsensusFork, bdata: BlockData, onStateUpdated: OnStateUpdated, - onBlockAdded: OnForkedBlockAdded + onBlockAdded: OnBlockAdded, ): Result[void, VerifierError] = var cache = StateCache() + template forkyBlck: untyped = bdata.blck.forky(consensusFork) + let + parent = checkHeadBlock(dag, forkyBlck).valueOr: + if error == VerifierError.Duplicate: + return ok() + return err(error) + startTick = Moment.now() + clearanceBlock = BlockSlotId.init(parent.bid, forkyBlck.message.slot) + updateFlags1 = dag.updateFlags + # TODO (cheatfate): {skipLastStateRootCalculation} flag here could + # improve performance by 100%, but this approach needs some + # improvements, which is unclear. + + if not updateState(dag, dag.clearanceState, clearanceBlock, true, cache, + updateFlags1): + error "Unable to load clearance state for parent block, " & + "database corrupt?", clearanceBlock = shortLog(clearanceBlock) + return err(VerifierError.MissingParent) - withBlck(bdata.blck): - let - parent = checkHeadBlock(dag, forkyBlck).valueOr: - if error == VerifierError.Duplicate: - return ok() - return err(error) - startTick = Moment.now() - clearanceBlock = BlockSlotId.init(parent.bid, forkyBlck.message.slot) - updateFlags1 = dag.updateFlags - # TODO (cheatfate): {skipLastStateRootCalculation} flag here could - # improve performance by 100%, but this approach needs some - # improvements, which is unclear. - - if not updateState(dag, dag.clearanceState, clearanceBlock, true, cache, - updateFlags1): - error "Unable to load clearance state for parent block, " & - "database corrupt?", clearanceBlock = shortLog(clearanceBlock) - return err(VerifierError.MissingParent) + let proposerVerifyTick = Moment.now() - let proposerVerifyTick = Moment.now() - - if not(isNil(onStateUpdated)): - ? onStateUpdated(forkyBlck.message.slot) - - let - stateDataTick = Moment.now() - updateFlags2 = - dag.updateFlags + {skipBlsValidation, skipStateRootValidation} - - ? checkStateTransition(dag, forkyBlck.asSigVerified(), cache, updateFlags2) - - let stateVerifyTick = Moment.now() - - if bdata.blob.isSome(): - for blob in bdata.blob.get(): - dag.db.putBlobSidecar(blob[]) - - type Trusted = typeof forkyBlck.asTrusted() - - proc onBlockAddedHandler( - blckRef: BlockRef, - trustedBlock: Trusted, - epochRef: EpochRef, - unrealized: FinalityCheckpoints - ) {.gcsafe, raises: [].} = - onBlockAdded( - blckRef, - ForkedTrustedSignedBeaconBlock.init(trustedBlock), - epochRef, - unrealized) - - let blockHandler: BlockAdded(consensusFork) = onBlockAddedHandler - - discard addResolvedHeadBlock( - dag, dag.clearanceState, - forkyBlck.asTrusted(), - OptimisticStatus.notValidated, - parent, cache, - blockHandler, - proposerVerifyTick - startTick, - stateDataTick - proposerVerifyTick, - stateVerifyTick - stateDataTick) + if not(isNil(onStateUpdated)): + ? onStateUpdated(forkyBlck.message.slot) + + let + stateDataTick = Moment.now() + updateFlags2 = + dag.updateFlags + {skipBlsValidation, skipStateRootValidation} + + ? checkStateTransition(dag, forkyBlck.asSigVerified(), cache, updateFlags2) + + let stateVerifyTick = Moment.now() + + if bdata.blob.isSome(): + for blob in bdata.blob.get(): + dag.db.putBlobSidecar(blob[]) + + discard addResolvedHeadBlock( + dag, dag.clearanceState, + forkyBlck.asTrusted(), + OptimisticStatus.notValidated, + parent, cache, + onBlockAdded, + proposerVerifyTick - startTick, + stateDataTick - proposerVerifyTick, + stateVerifyTick - stateDataTick) ok() diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index 977bd2d3a0..c2b4865c75 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -299,25 +299,9 @@ type blck*: ForkedSignedBeaconBlock blob*: Opt[BlobSidecars] - OnBlockAdded*[T: ForkyTrustedSignedBeaconBlock] = proc( - blckRef: BlockRef, blck: T, epochRef: EpochRef, - unrealized: FinalityCheckpoints) {.gcsafe, raises: [].} - OnPhase0BlockAdded* = OnBlockAdded[phase0.TrustedSignedBeaconBlock] - OnAltairBlockAdded* = OnBlockAdded[altair.TrustedSignedBeaconBlock] - OnBellatrixBlockAdded* = OnBlockAdded[bellatrix.TrustedSignedBeaconBlock] - OnCapellaBlockAdded* = OnBlockAdded[capella.TrustedSignedBeaconBlock] - OnDenebBlockAdded* = OnBlockAdded[deneb.TrustedSignedBeaconBlock] - OnElectraBlockAdded* = OnBlockAdded[electra.TrustedSignedBeaconBlock] - OnFuluBlockAdded* = OnBlockAdded[fulu.TrustedSignedBeaconBlock] - OnGloasBlockAdded* = OnBlockAdded[gloas.TrustedSignedBeaconBlock] - - OnForkyBlockAdded* = - OnPhase0BlockAdded | OnAltairBlockAdded | OnBellatrixBlockAdded | - OnCapellaBlockAdded | OnDenebBlockAdded | OnElectraBlockAdded | - OnFuluBlockAdded | OnGloasBlockAdded - - OnForkedBlockAdded* = proc( - blckRef: BlockRef, blck: ForkedTrustedSignedBeaconBlock, epochRef: EpochRef, + OnBlockAdded*[consensusFork: static ConsensusFork] = proc( + blckRef: BlockRef, blck: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints) {.gcsafe, raises: [].} OnStateUpdated* = proc( @@ -356,26 +340,6 @@ type slot*: Slot block_root* {.serializedFieldName: "block".}: Eth2Digest -template OnBlockAddedCallback*(kind: static ConsensusFork): auto = - when kind == ConsensusFork.Gloas: - typedesc[OnGloasBlockAdded] - elif kind == ConsensusFork.Fulu: - typedesc[OnFuluBlockAdded] - elif kind == ConsensusFork.Electra: - typedesc[OnElectraBlockAdded] - elif kind == ConsensusFork.Deneb: - typedesc[OnDenebBlockAdded] - elif kind == ConsensusFork.Capella: - typedesc[OnCapellaBlockAdded] - elif kind == ConsensusFork.Bellatrix: - typedesc[OnBellatrixBlockAdded] - elif kind == ConsensusFork.Altair: - typedesc[OnAltairBlockAdded] - elif kind == ConsensusFork.Phase0: - typedesc[OnPhase0BlockAdded] - else: - static: raiseAssert "Unreachable" - func proposer_dependent_slot*(epochRef: EpochRef): Slot = epochRef.key.epoch.proposer_dependent_slot() diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index 084dbb6b76..b11d928c95 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -990,53 +990,10 @@ proc applyBlock( updateFlags: UpdateFlags): Result[void, cstring] = loadStateCache(dag, cache, bid, getStateField(state, slot).epoch) - discard case dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - of ConsensusFork.Phase0: - let data = getBlock(dag, bid, phase0.TrustedSignedBeaconBlock).valueOr: + withConsensusFork(dag.cfg.consensusForkAtEpoch(bid.slot.epoch)): + let data = getBlock(dag, bid, consensusFork.TrustedSignedBeaconBlock).valueOr: return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Altair: - let data = getBlock(dag, bid, altair.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Bellatrix: - let data = getBlock(dag, bid, bellatrix.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Capella: - let data = getBlock(dag, bid, capella.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Deneb: - let data = getBlock(dag, bid, deneb.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Electra: - let data = getBlock(dag, bid, electra.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Fulu: - let data = getBlock(dag, bid, fulu.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Gloas: - let data = getBlock(dag, bid, gloas.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( + discard ? state_transition( dag.cfg, state, data, cache, info, updateFlags + {slotProcessed}, noRollback) diff --git a/beacon_chain/consensus_object_pools/consensus_manager.nim b/beacon_chain/consensus_object_pools/consensus_manager.nim index 873a99ba70..b16d7c68a1 100644 --- a/beacon_chain/consensus_object_pools/consensus_manager.nim +++ b/beacon_chain/consensus_object_pools/consensus_manager.nim @@ -282,22 +282,39 @@ proc getFeeRecipient*( proc getGasLimit*(self: ConsensusManager, pubkey: ValidatorPubKey): uint64 = getGasLimit(self.validatorsDir, self.defaultGasLimit, pubkey) -proc proposalForkchoiceUpdated*( +proc prepareNextSlot*( self: ref ConsensusManager, proposalSlot: Slot, deadline: DeadlineFuture ) {.async: (raises: [CancelledError]).} = ## Send a "warm-up" forkchoiceUpdated to the execution client, assuming that - ## `clearanceState` has been updated to the expected epoch of the proposal - if self.forkchoiceInflight: - debug "Skipping proposal fcU, forkchoiceUpdated already in flight", proposalSlot + ## `clearanceState` has been updated to the expected epoch of the proposal - + ## at the same time, ensure that the clearance state is ready for the next + ## block + + # When the chain is synced, the most likely block to be produced is the block + # right after head - we can exploit this assumption and advance the state + # to that slot before the block arrives, thus allowing us to do the expensive + # epoch transition ahead of time. + # Notably, we use the clearance state here because that's what the clearance + # function uses to validate the incoming block (or the one that's about to be + # produced) + let + dag = self.dag + head = dag.head + nextBsi = BlockSlotId.init(head.bid, proposalSlot) + startTick = Moment.now() + + var cache = StateCache() + if not dag.updateState(dag.clearanceState, nextBsi, true, cache, dag.updateFlags): + # This should never happen since we're basically advancing the slots of the + # head state + warn "Cannot prepare clearance state for next block - bug?" return - let head = self.dag.head - - # Sending the proposal fcU requires that the state epoch matches the proposal - # epoch so that the withdrawals can be computed correctly - if not self.dag.clearanceState.matches_block_slot(head.root, proposalSlot): - debug "Skipping proposal fcU, clearance state not prepared", head, proposalSlot + debug "Prepared clearance state for next block", + nextBsi, updateStateDur = Moment.now() - startTick + if self.forkchoiceInflight: + debug "Skipping proposal fcU, forkchoiceUpdated already in flight", proposalSlot return let @@ -312,7 +329,7 @@ proc proposalForkchoiceUpdated*( # Approximately lines up with validator_duties version. Used optimistically/ # opportunistically, so mismatches are fine if not too frequent. - withState(self.dag.clearanceState): + withState(dag.clearanceState): when consensusFork == ConsensusFork.Gloas: debugGloasComment "well, likely can't keep reusing V3 much longer" elif consensusFork in ConsensusFork.Bellatrix .. ConsensusFork.Fulu: @@ -327,7 +344,7 @@ proc proposalForkchoiceUpdated*( nextProposer, Opt.some(validatorIndex), proposalSlot.epoch ) beaconHead = self.attestationPool[].getBeaconHead(head) - headBlockHash = self.dag.loadExecutionBlockHash(beaconHead.blck).valueOr: + headBlockHash = dag.loadExecutionBlockHash(beaconHead.blck).valueOr: return if headBlockHash.isZero: diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index 5ae7695242..791aa9da5d 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -23,7 +23,7 @@ from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds from ../consensus_object_pools/block_dag import BlockRef, OptimisticStatus, executionValid, root, shortLog, slot from ../consensus_object_pools/block_pools_types import - EpochRef, VerifierError + ChainDAGRef, EpochRef, OnBlockAdded, VerifierError from ../consensus_object_pools/block_quarantine import addSidecarless, addOrphan, addUnviable, pop, removeOrphan, removeSidecarless from ../consensus_object_pools/blob_quarantine import @@ -110,6 +110,9 @@ type ## The slot at which we sent a payload to the execution client the last ## time + NoSidecars = typeof(()) + SomeOptSidecars = NoSidecars | Opt[BlobSidecars] | Opt[DatacolumnSidecars] + # Initialization # ------------------------------------------------------------------------------ @@ -171,99 +174,101 @@ proc dumpBlock[T]( from ../consensus_object_pools/block_clearance import addBackfillBlock, addHeadBlockWithParent, checkHeadBlock -proc storeBackfillBlock( - self: var BlockProcessor, - signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock | - bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | - deneb.SignedBeaconBlock | electra.SignedBeaconBlock, - blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = - # The block is certainly not missing any more - self.consensusManager.quarantine[].missing.del(signedBlock.root) +template selectSidecars( + consensusFork: static ConsensusFork, + blobsOpt: Opt[BlobSidecars], + columnsOpt: Opt[DataColumnSidecars], +): untyped = + # The when jungle here must be kept consistent with `verifySidecars` + when consensusFork in ConsensusFork.Fulu .. ConsensusFork.Gloas: + doAssert blobsOpt.isNone(), "No blobs in " & $consensusFork + columnsOpt + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + doAssert columnsOpt.isNone(), "No columns in " & $consensusFork + blobsOpt + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + doAssert blobsOpt.isNone and columnsOpt.isNone(), + "No blobs/columns in " & $consensusFork + default(NoSidecars) + else: + {.error: "Unkown fork " & $consensusFork.} - # Establish blob viability before calling addbackfillBlock to avoid - # writing the block in case of blob error. - var blobsOk = true - when typeof(signedBlock).kind in [ConsensusFork.Deneb, ConsensusFork.Electra]: - if blobsOpt.isSome: - let blobs = blobsOpt.get() +proc verifySidecars( + signedBlock: ForkySignedBeaconBlock, + sidecarsOpt: SomeOptSidecars, +): Result[void, VerifierError] = + const consensusFork = typeof(signedBlock).kind + + when consensusFork == ConsensusFork.Gloas: + # For Gloas, we still need to store the columns if they're provided + # but skip validation since we don't have kzg_commitments in the block + if sidecarsOpt.isSome: + debugGloasComment "potentially validate against payload envelope" + let columns = sidecarsOpt.get() + discard + elif consensusFork == ConsensusFork.Fulu: + if sidecarsOpt.isSome: + let columns = sidecarsOpt.get() + let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq + if columns.len > 0 and kzgCommits.len > 0: + for i in 0 ..< columns.len: + let r = verify_data_column_sidecar_kzg_proofs(columns[i][]) + if r.isErr(): + debug "data column validation failed", + blockRoot = shortLog(signedBlock.root), + column_sidecar = shortLog(columns[i][]), + blck = shortLog(signedBlock.message), + signature = shortLog(signedBlock.signature), + msg = r.error() + return err(VerifierError.Invalid) + elif consensusFork in ConsensusFork.Deneb .. ConsensusFork.Electra: + if sidecarsOpt.isSome: + let blobs = sidecarsOpt.get() let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq if blobs.len > 0 or kzgCommits.len > 0: - let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), - blobs.mapIt(it.kzg_proof)) + let r = validate_blobs( + kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), blobs.mapIt(it.kzg_proof) + ) if r.isErr(): - debug "backfill blob validation failed", + debug "blob validation failed", blockRoot = shortLog(signedBlock.root), blobs = shortLog(blobs), blck = shortLog(signedBlock.message), kzgCommits = mapIt(kzgCommits, shortLog(it)), signature = shortLog(signedBlock.signature), msg = r.error() - blobsOk = r.isOk() - - if not blobsOk: - return err(VerifierError.Invalid) - - let res = self.consensusManager.dag.addBackfillBlock(signedBlock) + return err(VerifierError.Invalid) + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + static: doAssert sidecarsOpt is NoSidecars + else: + {.error: "Unknown consensus fork " & $consensusFork.} - if res.isErr(): - case res.error - of VerifierError.MissingParent: - if signedBlock.message.parent_root in - self.consensusManager.quarantine[].unviable: - # DAG doesn't know about unviable ancestor blocks - we do! Translate - # this to the appropriate error so that sync etc doesn't retry the block - self.consensusManager.quarantine[].addUnviable(signedBlock.root) + ok() - return err(VerifierError.UnviableFork) - of VerifierError.UnviableFork: - # Track unviables so that descendants can be discarded properly - self.consensusManager.quarantine[].addUnviable(signedBlock.root) - else: discard - return res +proc storeSidecars(self: BlockProcessor, sidecarsOpt: Opt[BlobSidecars]) = + if sidecarsOpt.isSome(): + debug "Inserting blobs into database", blobs = sidecarsOpt[].len + for b in sidecarsOpt[]: + self.consensusManager.dag.db.putBlobSidecar(b[]) - # Only store blobs after successfully establishing block viability. - let blobs = blobsOpt.valueOr: BlobSidecars @[] - for b in blobs: - self.consensusManager.dag.db.putBlobSidecar(b[]) +proc storeSidecars(self: BlockProcessor, sidecarsOpt: Opt[DataColumnSidecars]) = + if sidecarsOpt.isSome(): + debug "Inserting columns into database", columns = sidecarsOpt[].len + for c in sidecarsOpt[]: + self.consensusManager.dag.db.putDataColumnSidecar(c[]) - res +proc storeSidecars(self: BlockProcessor, sidecarsOpt: NoSidecars) = + discard proc storeBackfillBlock( self: var BlockProcessor, - signedBlock: fulu.SignedBeaconBlock | gloas.SignedBeaconBlock, - dataColumnsOpt: Opt[DataColumnSidecars]): Result[void, VerifierError] = + signedBlock: ForkySignedBeaconBlock, + sidecarsOpt: SomeOptSidecars, +): Result[void, VerifierError] = # The block is certainly not missing any more self.consensusManager.quarantine[].missing.del(signedBlock.root) - var - columnsOk = true - - when signedBlock is gloas.SignedBeaconBlock: - # For Gloas, we still need to store the columns if they're provided - # but skip validation since we don't have kzg_commitments in the block - if dataColumnsOpt.isSome: - debugGloasComment "potentially validate against payload envelope" - let columns = dataColumnsOpt.get() - discard - else: - if dataColumnsOpt.isSome: - let columns = dataColumnsOpt.get() - let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq - if columns.len > 0 and kzgCommits.len > 0: - for i in 0..= ConsensusFork.Fulu: - resfut.complete(self.storeBackfillBlock(forkyBlck, data_columns)) - else: - resfut.complete(self.storeBackfillBlock(forkyBlck, blobs)) + let sidecars = selectSidecars(consensusFork, blobs, data_columns) + resfut.complete(self.storeBackfillBlock(forkyBlck, sidecars)) return + try: self.blockQueue.addLastNoWait(BlockEntry( blck: blck, @@ -473,13 +473,98 @@ proc enqueueQuarantine(self: var BlockProcessor, root: Eth2Digest) = else: {.error: "Unknown consensus fork " & $consensusFork.} +proc onBlockAdded*( + dag: ChainDAGRef, + consensusFork: static ConsensusFork, + src: MsgSource, + wallTime: BeaconTime, + attestationPool: ref AttestationPool, + validatorMonitor: ref ValidatorMonitor, +): OnBlockAdded[consensusFork] = + # Actions to perform when a block is successfully added to the DAG, while + # still having access to the clearance state data + + return proc( + blckRef: BlockRef, + blck: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.BeaconState, + epochRef: EpochRef, + unrealized: FinalityCheckpoints, + ) = + attestationPool[].addForkChoice( + epochRef, blckRef, unrealized, blck.message, wallTime + ) + + validatorMonitor[].registerBeaconBlock(src, wallTime, blck.message) + + for attestation in blck.message.body.attestations: + for vidx in dag.get_attesting_indices(attestation, true): + validatorMonitor[].registerAttestationInBlock( + attestation.data, vidx, blck.message.slot + ) + + when consensusFork >= ConsensusFork.Altair: + for i in blck.message.body.sync_aggregate.sync_committee_bits.oneIndices(): + validatorMonitor[].registerSyncAggregateInBlock( + blck.message.slot, blck.root, state.current_sync_committee.pubkeys.data[i] + ) + +proc verifyPayload( + self: ref BlockProcessor, signedBlock: ForkySignedBeaconBlock +): Result[OptimisticStatus, VerifierError] = + const consensusFork = typeof(signedBlock).kind + # When the execution layer is not available to verify the payload, we do the + # required checks on the CL instead and proceed as if the EL was syncing + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload + when consensusFork == ConsensusFork.Gloas: + debugGloasComment "no exection payload field for gloas" + ok OptimisticStatus.valid + elif consensusFork >= ConsensusFork.Bellatrix: + if signedBlock.message.is_execution_block: + template payload(): auto = + signedBlock.message.body.execution_payload + + template returnWithError(msg: string, extraMsg = ""): untyped = + if extraMsg != "": + debug msg, reason = extraMsg, executionPayload = shortLog(payload) + else: + debug msg, executionPayload = shortLog(payload) + return err(VerifierError.Invalid) + + if payload.transactions.anyIt(it.len == 0): + returnWithError "Execution block contains zero length transactions" + + if payload.block_hash != signedBlock.message.compute_execution_block_hash(): + returnWithError "Execution block hash validation failed" + + # [New in Deneb:EIP4844] + when consensusFork >= ConsensusFork.Deneb: + let blobsRes = signedBlock.message.is_valid_versioned_hashes + if blobsRes.isErr: + returnWithError "Blob versioned hashes invalid", blobsRes.error + else: + # If there are EIP-4844 (type 3) transactions in the payload with + # versioned hashes, the transactions would be rejected by the EL + # based on payload timestamp (only allowed post Deneb); + # There are no `blob_kzg_commitments` before Deneb to compare against + discard + + if signedBlock.root in self.invalidBlockRoots: + returnWithError "Block root treated as invalid via config", $signedBlock.root + + ok OptimisticStatus.notValidated + else: + ok OptimisticStatus.valid + else: + ok OptimisticStatus.valid + proc storeBlock( self: ref BlockProcessor, src: MsgSource, wallTime: BeaconTime, signedBlock: ForkySignedBeaconBlock, - blobsOpt: Opt[BlobSidecars], - dataColumnsOpt: Opt[DataColumnSidecars], + sidecarsOpt: SomeOptSidecars, maybeFinalized: bool, queueTick: Moment, validationDur: Duration, @@ -490,7 +575,7 @@ proc storeBlock( ## to know about it, such as the fork choice and the monitoring let - attestationPool = self.consensusManager.attestationPool + ap = self.consensusManager.attestationPool startTick = Moment.now() vm = self.validatorMonitor dag = self.consensusManager.dag @@ -515,19 +600,16 @@ proc storeBlock( # DAG doesn't know about unviable ancestor blocks - we do however! return err(VerifierError.UnviableFork) - let - # We have to be careful that there exists only one in-flight entry point - # for adding blocks or the checks performed in `checkHeadBlock` might - # be invalidated (ie a block could be added while we wait for EL response - # here) - parent = dag.checkHeadBlock(signedBlock) - - if parent.isErr(): + # We have to be careful that there exists only one in-flight entry point + # for adding blocks or the checks performed in `checkHeadBlock` might + # be invalidated (ie a block could be added while we wait for EL response + # here) + let parent = dag.checkHeadBlock(signedBlock).valueOr: # TODO This logic can be removed if the database schema is extended # to store non-canonical heads on top of the canonical head! # If that is done, the database no longer contains extra blocks # that have not yet been assigned a `BlockRef` - if parent.error() == VerifierError.MissingParent: + if error == VerifierError.MissingParent: # This indicates that no `BlockRef` is available for the `parent_root`. # However, the block may still be available in local storage. On startup, # only the canonical branch is imported into `blockchain_dag`, while @@ -593,7 +675,7 @@ proc storeBlock( MsgSource.gossip, parentBlck.unsafeGet().asSigned(), Opt.none(BlobSidecars), columns) - return err(parent.error()) + return err(error) const consensusFork = typeof(signedBlock).kind let @@ -621,127 +703,23 @@ proc storeBlock( else: Opt.some(OptimisticStatus.valid) # vacuously - let optimisticStatus = optimisticStatusRes.valueOr: - # When the execution layer is not available to verify the payload, we do the - # required checks on the CL instead and proceed as if the EL was syncing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload - when typeof(signedBlock).kind >= ConsensusFork.Bellatrix and - typeof(signedBlock).kind < ConsensusFork.Gloas: - debugGloasComment "no exection payload field for gloas" - if signedBlock.message.is_execution_block: - template payload(): auto = signedBlock.message.body.execution_payload - - template returnWithError(msg: string, extraMsg = ""): untyped = - if extraMsg != "": - debug msg, reason = extraMsg, executionPayload = shortLog(payload) - else: - debug msg, executionPayload = shortLog(payload) - doAssert strictVerification notin dag.updateFlags - return err(VerifierError.Invalid) - - if payload.transactions.anyIt(it.len == 0): - returnWithError "Execution block contains zero length transactions" - - if payload.block_hash != - signedBlock.message.compute_execution_block_hash(): - returnWithError "Execution block hash validation failed" - - # [New in Deneb:EIP4844] - when consensusFork >= ConsensusFork.Deneb: - let blobsRes = signedBlock.message.is_valid_versioned_hashes - if blobsRes.isErr: - returnWithError "Blob versioned hashes invalid", blobsRes.error - else: - # If there are EIP-4844 (type 3) transactions in the payload with - # versioned hashes, the transactions would be rejected by the EL - # based on payload timestamp (only allowed post Deneb); - # There are no `blob_kzg_commitments` before Deneb to compare against - discard - - if signedBlock.root in self.invalidBlockRoots: - returnWithError "Block root treated as invalid via config", - $signedBlock.root - - OptimisticStatus.notValidated - else: - OptimisticStatus.valid + let optimisticStatus = ?(optimisticStatusRes or verifyPayload(self, signedBlock)) if OptimisticStatus.invalidated == optimisticStatus: return err(VerifierError.Invalid) let newPayloadTick = Moment.now() - when typeof(signedBlock).kind >= ConsensusFork.Fulu and - typeof(signedBlock).kind < ConsensusFork.Gloas: - debugGloasComment "no blob_kzg_commitments field for gloas" - if dataColumnsOpt.isSome: - let - columns0 = dataColumnsOpt.get() - kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq - if columns0.len > 0 and kzgCommits.len > 0: - for i in 0..= ConsensusFork.Deneb and - typeof(signedBlock).kind < ConsensusFork.Gloas: - debugGloasComment "no blob_kzg_commitments field for gloas" - if blobsOpt.isSome: - let blobs = blobsOpt.get() - let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq - if blobs.len > 0 or kzgCommits.len > 0: - let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), - blobs.mapIt(it.kzg_proof)) - if r.isErr(): - debug "blob validation failed", - blockRoot = shortLog(signedBlock.root), - blobs = shortLog(blobs), - blck = shortLog(signedBlock.message), - kzgCommits = mapIt(kzgCommits, shortLog(it)), - signature = shortLog(signedBlock.signature), - msg = r.error() - return err(VerifierError.Invalid) + ?verifySidecars(signedBlock, sidecarsOpt) - type Trusted = typeof signedBlock.asTrusted() - - let - blckRes = dag.addHeadBlockWithParent( - self.verifier, signedBlock, parent.value(), optimisticStatus) do ( - blckRef: BlockRef, trustedBlock: Trusted, - epochRef: EpochRef, unrealized: FinalityCheckpoints): - # Callback add to fork choice if valid - attestationPool[].addForkChoice( - epochRef, blckRef, unrealized, trustedBlock.message, wallTime) - - vm[].registerBeaconBlock( - src, wallTime, trustedBlock.message) - - for attestation in trustedBlock.message.body.attestations: - for validator_index in dag.get_attesting_indices(attestation, true): - vm[].registerAttestationInBlock(attestation.data, validator_index, - trustedBlock.message.slot) - - withState(dag[].clearanceState): - when consensusFork >= ConsensusFork.Altair and - consensusFork == typeof(signedBlock).kind: - for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices(): - vm[].registerSyncAggregateInBlock( - trustedBlock.message.slot, trustedBlock.root, - forkyState.data.current_sync_committee.pubkeys.data[i]) - - let blck = ?blckRes # `?` and `do?` are not friendly with each other + let blck = + ?dag.addHeadBlockWithParent( + self.verifier, + signedBlock, + parent, + optimisticStatus, + onBlockAdded(dag, consensusFork, src, wallTime, ap, vm), + ) # Even if the EL is not responding, we'll only try once every now and then # to give it a block - this avoids a pathological slowdown where a busy EL @@ -750,16 +728,7 @@ proc storeBlock( self[].lastPayload = signedBlock.message.slot # write blobs now that block has been written. - let blobs = blobsOpt.valueOr: BlobSidecars @[] - for b in blobs: - self.consensusManager.dag.db.putBlobSidecar(b[]) - - # write data columns now that block has been written - let data_columns = dataColumnsOpt.valueOr: DataColumnSidecars @[] - debug "Inserting columns into database", - indices = data_columns.mapIt($it.index).len - for col in data_columns: - self.consensusManager.dag.db.putDataColumnSidecar(col[]) + self[].storeSidecars(sidecarsOpt) let addHeadBlockTick = Moment.now() @@ -892,9 +861,11 @@ proc processBlock( let res = withBlck(entry.blck): - let res = await self.storeBlock( - entry.src, wallTime, forkyBlck, entry.blobs, entry.columns, - entry.maybeFinalized, entry.queueTick, entry.validationDur) + let + sidecars = selectSidecars(consensusFork, entry.blobs, entry.columns) + res = await self.storeBlock( + entry.src, wallTime, forkyBlck, sidecars, + entry.maybeFinalized, entry.queueTick, entry.validationDur) self[].dumpBlock(forkyBlck, res) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 6d58875c48..21dc98d74c 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1948,12 +1948,11 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = head = node.dag.head # could be a new head compared to earlier if nextSlotCutoff.inFuture and node.isSynced(head) and head.executionValid: - node.dag.advanceClearanceState(nextSlot) - # If there is a proposal, we want to let the execution client know a bit # earlier - the risk is that fork choice changes again before the proposal - # but this risk should be small - node.consensusManager.proposalForkchoiceUpdated( + # but this risk should be small - this function also prepares the + # clearance state for the most likely block to be arriving next + node.consensusManager.prepareNextSlot( nextSlot, sleepAsync(nextSlotCutoff.offset) ) else: diff --git a/beacon_chain/spec/forks.nim b/beacon_chain/spec/forks.nim index 388fbdc70f..bde57c5209 100644 --- a/beacon_chain/spec/forks.nim +++ b/beacon_chain/spec/forks.nim @@ -1020,6 +1020,7 @@ template withState*(x: ForkedHashedBeaconState, body: untyped): untyped = template forky*( x: ForkedBeaconBlock | + ForkedSignedBeaconBlock | ForkedHashedBeaconState, kind: static ConsensusFork): untyped = when kind == ConsensusFork.Gloas: diff --git a/beacon_chain/spec/state_transition.nim b/beacon_chain/spec/state_transition.nim index 0e4da15ad6..a207ccb08b 100644 --- a/beacon_chain/spec/state_transition.nim +++ b/beacon_chain/spec/state_transition.nim @@ -90,9 +90,8 @@ func verifyStateRoot( type RollbackProc* = proc() {.gcsafe, noSideEffect, raises: [].} - RollbackHashedProc*[T] = - proc(state: var T) {.gcsafe, noSideEffect, raises: [].} - RollbackForkedHashedProc* = RollbackHashedProc[ForkedHashedBeaconState] + RollbackForkedHashedProc* = + proc(state: var ForkedHashedBeaconState) {.gcsafe, noSideEffect, raises: [].} func noRollback*() = trace "Skipping rollback of broken state" diff --git a/beacon_chain/sync/sync_overseer.nim b/beacon_chain/sync/sync_overseer.nim index f6cc5f09c8..90ed08c754 100644 --- a/beacon_chain/sync/sync_overseer.nim +++ b/beacon_chain/sync/sync_overseer.nim @@ -223,33 +223,6 @@ proc blockProcessingLoop(overseer: SyncOverseerRef): Future[void] {. attestationPool = consensusManager.attestationPool validatorMonitor = overseer.validatorMonitor - proc onBlockAdded( - blckRef: BlockRef, blck: ForkedTrustedSignedBeaconBlock, epochRef: EpochRef, - unrealized: FinalityCheckpoints) {.gcsafe, raises: [].} = - - let wallTime = overseer.getBeaconTimeFn() - withBlck(blck): - attestationPool[].addForkChoice( - epochRef, blckRef, unrealized, forkyBlck.message, wallTime) - - validatorMonitor[].registerBeaconBlock( - MsgSource.sync, wallTime, forkyBlck.message) - - for attestation in forkyBlck.message.body.attestations: - for validator_index in - dag.get_attesting_indices(attestation, true): - validatorMonitor[].registerAttestationInBlock( - attestation.data, validator_index, forkyBlck.message.slot) - - withState(dag[].clearanceState): - when (consensusFork >= ConsensusFork.Altair) and - (type(forkyBlck) isnot phase0.TrustedSignedBeaconBlock): - for i in forkyBlck.message.body.sync_aggregate. - sync_committee_bits.oneIndices(): - validatorMonitor[].registerSyncAggregateInBlock( - forkyBlck.message.slot, forkyBlck.root, - forkyState.data.current_sync_committee.pubkeys.data[i]) - block mainLoop: while true: let bchunk = await overseer.blocksQueue.popFirst() @@ -257,11 +230,23 @@ proc blockProcessingLoop(overseer: SyncOverseerRef): Future[void] {. block innerLoop: for bdata in bchunk.blocks: block: - let res = addBackfillBlockData(dag, bdata, bchunk.onStateUpdatedCb, - onBlockAdded) + let res = withBlck(bdata.blck): + addBackfillBlockData( + dag, + consensusFork, + bdata, + bchunk.onStateUpdatedCb, + onBlockAdded( + dag, + consensusFork, + MsgSource.sync, + overseer.getBeaconTimeFn(), + attestationPool, + validatorMonitor, + ), + ) if res.isErr(): - let msg = "Unable to add block data to database [" & - $res.error & "]" + let msg = "Unable to add block data to database [" & $res.error & "]" bchunk.resfut.complete(Result[void, string].err(msg)) break innerLoop diff --git a/research/block_sim.nim b/research/block_sim.nim index d83d106ede..4dcfe43af4 100644 --- a/research/block_sim.nim +++ b/research/block_sim.nim @@ -303,9 +303,10 @@ cli do( .toValidatorSig() # TODO without the OnBlockAdded cast, Nim can't figure out the type (?) - let onAdded: OnBlockAdded[consensusFork.TrustedSignedBeaconBlock] = proc( + let onAdded: OnBlockAdded[consensusFork] = proc( blckRef: BlockRef, signedBlock: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints, ) = diff --git a/tests/consensus_spec/test_fixture_fork_choice.nim b/tests/consensus_spec/test_fixture_fork_choice.nim index d472fec881..fa2ec176e5 100644 --- a/tests/consensus_spec/test_fixture_fork_choice.nim +++ b/tests/consensus_spec/test_fixture_fork_choice.nim @@ -269,6 +269,7 @@ proc stepOnBlock( let blockAdded = dag.addHeadBlock(verifier, signedBlock) do ( blckRef: BlockRef, signedBlock: consensusFork.TrustedSignedBeaconBlock, + state: consensusFork.Beaconstate, epochRef: EpochRef, unrealized: FinalityCheckpoints): # 4. Update fork choice if valid diff --git a/tests/consensus_spec/test_fixture_light_client_data_collection.nim b/tests/consensus_spec/test_fixture_light_client_data_collection.nim index bd71669877..f8ff5e6e6a 100644 --- a/tests/consensus_spec/test_fixture_light_client_data_collection.nim +++ b/tests/consensus_spec/test_fixture_light_client_data_collection.nim @@ -154,7 +154,7 @@ proc runTest(suiteName, path: string, consensusFork: static ConsensusFork) = of TestStepKind.NewBlock: checkpoint $i & " new_block: " & $shortLog(step.blck.toBlockId()) let added = withBlck(step.blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check: added.isOk() of TestStepKind.NewHead: diff --git a/tests/test_attestation_pool.nim b/tests/test_attestation_pool.nim index 875a30f415..1e08c7a656 100644 --- a/tests/test_attestation_pool.nim +++ b/tests/test_attestation_pool.nim @@ -110,7 +110,7 @@ suite "Attestation pool processing" & preset(): blck = addTestBlock( state[], cache, attestations = attestations, cfg = dag.cfg) check dag.addHeadBlock( - verifier, blck.phase0Data, OnPhase0BlockAdded(nil)).isOk + verifier, blck.phase0Data, OnBlockAdded[ConsensusFork.Phase0](nil)).isOk # History 1 contains all odd blocks state.fillToEpoch(cache) @@ -530,6 +530,7 @@ suite "Attestation pool processing" & preset(): b1 = addTestBlock(state[], cache).phase0Data b1Add = dag.addHeadBlock(verifier, b1) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -545,6 +546,7 @@ suite "Attestation pool processing" & preset(): b2 = addTestBlock(state[], cache).phase0Data b2Add = dag.addHeadBlock(verifier, b2) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -563,6 +565,7 @@ suite "Attestation pool processing" & preset(): b10 = makeTestBlock(state[], cache).phase0Data b10Add = dag.addHeadBlock(verifier, b10) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -583,6 +586,7 @@ suite "Attestation pool processing" & preset(): ).phase0Data b11Add = dag.addHeadBlock(verifier, b11) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -637,6 +641,7 @@ suite "Attestation pool processing" & preset(): b10 = makeTestBlock(state[], cache).phase0Data b10Add = dag.addHeadBlock(verifier, b10) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -654,6 +659,7 @@ suite "Attestation pool processing" & preset(): let b10_clone = b10 # Assumes deep copy let b10Add_clone = dag.addHeadBlock(verifier, b10_clone) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -671,6 +677,7 @@ suite "Attestation pool processing" & preset(): b10 = addTestBlock(state[], cache).phase0Data b10Add = dag.addHeadBlock(verifier, b10) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -699,6 +706,7 @@ suite "Attestation pool processing" & preset(): let blockRef = dag.addHeadBlock(verifier, new_block) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -744,6 +752,7 @@ suite "Attestation pool processing" & preset(): # Add back the old block to ensure we have a duplicate error let b10Add_clone = dag.addHeadBlock(verifier, b10_clone) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( diff --git a/tests/test_blockchain_dag.nim b/tests/test_blockchain_dag.nim index c9c6a29f9f..776a0ae893 100644 --- a/tests/test_blockchain_dag.nim +++ b/tests/test_blockchain_dag.nim @@ -28,9 +28,9 @@ from ./testbcutil import addHeadBlock func `$`(x: BlockRef): string = shortLog(x) const - nilPhase0Callback = OnPhase0BlockAdded(nil) - nilAltairCallback = OnAltairBlockAdded(nil) - nilBellatrixCallback = OnBellatrixBlockAdded(nil) + nilPhase0Callback = OnBlockAdded[ConsensusFork.Phase0](nil) + nilAltairCallback = OnBlockAdded[ConsensusFork.Altair](nil) + nilBellatrixCallback = OnBlockAdded[ConsensusFork.Bellatrix](nil) proc pruneAtFinalization(dag: ChainDAGRef) = if dag.needStateCachesAndForkChoicePruning(): @@ -1725,7 +1725,7 @@ template runShufflingTests(cfg: RuntimeConfig, numRandomTests: int) = attested = attested, allDeposits = deposits, graffiti = graffiti, cfg = cfg): let added = withBlck(forkedBlck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check added.isOk() dag.updateHead(added[], quarantine[], []) diff --git a/tests/test_gossip_validation.nim b/tests/test_gossip_validation.nim index 2c7dac7ac9..dda362be3d 100644 --- a/tests/test_gossip_validation.nim +++ b/tests/test_gossip_validation.nim @@ -84,6 +84,7 @@ suite "Gossip validation " & preset(): dag.headState, cache, int(SLOTS_PER_EPOCH * 5), attested = false): let added = dag.addHeadBlock(verifier, blck.phase0Data) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, + state: phase0.BeaconState, epochRef: EpochRef, unrealized: FinalityCheckpoints): # Callback add to fork choice if valid pool[].addForkChoice( @@ -218,7 +219,7 @@ suite "Gossip validation - Altair": dag.headState, cache, blocks = 1, attested = false, cfg = cfg): let added = withBlck(blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check: added.isOk() dag.updateHead(added[], quarantine, []) diff --git a/tests/test_light_client.nim b/tests/test_light_client.nim index 3e6a3f1edc..d4b7f4de83 100644 --- a/tests/test_light_client.nim +++ b/tests/test_light_client.nim @@ -69,7 +69,7 @@ suite "Light client" & preset(): dag.headState, cache, blocks.int, attested = attested, syncCommitteeRatio = syncCommitteeRatio, cfg = cfg): let added = withBlck(blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) check: added.isOk() dag.updateHead(added[], quarantine, []) diff --git a/tests/test_light_client_processor.nim b/tests/test_light_client_processor.nim index f2a2c72f24..948fc8ed39 100644 --- a/tests/test_light_client_processor.nim +++ b/tests/test_light_client_processor.nim @@ -58,7 +58,7 @@ suite "Light client processor" & preset(): dag.headState, cache, blocks.int, attested = true, syncCommitteeRatio = syncCommitteeRatio, cfg = cfg): let added = withBlck(blck): - const nilCallback = (consensusFork.OnBlockAddedCallback)(nil) + const nilCallback = OnBlockAdded[consensusFork](nil) dag.addHeadBlock(verifier, forkyBlck, nilCallback) doAssert added.isOk() dag.updateHead(added[], quarantine[], []) diff --git a/tests/testbcutil.nim b/tests/testbcutil.nim index ac9b2c25f1..b21d90c7b9 100644 --- a/tests/testbcutil.nim +++ b/tests/testbcutil.nim @@ -14,15 +14,24 @@ from ../beacon_chain/consensus_object_pools/block_clearance import from ../beacon_chain/consensus_object_pools/block_dag import BlockRef, OptimisticStatus from ../beacon_chain/consensus_object_pools/block_pools_types import - ChainDAGRef, OnForkyBlockAdded, VerifierError + ChainDAGRef, OnBlockAdded, VerifierError from ../beacon_chain/spec/forks import ForkySignedBeaconBlock from ../beacon_chain/spec/signatures_batch import BatchVerifier -proc addHeadBlock*( +proc addHeadBlockImpl( dag: ChainDAGRef, verifier: var BatchVerifier, signedBlock: ForkySignedBeaconBlock, - onBlockAdded: OnForkyBlockAdded + onBlockAdded: OnBlockAdded ): Result[BlockRef, VerifierError] = addHeadBlockWithParent( dag, verifier, signedBlock, ? dag.checkHeadBlock(signedBlock), OptimisticStatus.valid, onBlockAdded) + +template addHeadBlock*( + dag: ChainDAGRef, verifier: var BatchVerifier, + signedBlock: ForkySignedBeaconBlock, + onBlockAddedParam: untyped + ): Result[BlockRef, VerifierError] = + let onBlockAdded: OnBlockAdded[typeof(signedBlock).kind] = onBlockAddedParam + + addHeadBlockImpl(dag, verifier, signedBlock, onBlockAdded)