Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions beacon_chain/gossip_processing/block_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ proc storeBackfillBlock(
self.consensusManager.quarantine[].missing.del(signedBlock.root)
var
columnsOk = true
malformed_cols: HashSet[int]

when signedBlock is gloas.SignedBeaconBlock:
# For Gloas, we still need to store the columns if they're provided
Expand All @@ -252,17 +251,14 @@ proc storeBackfillBlock(
if columns.len > 0 and kzgCommits.len > 0:
for i in 0..<columns.len:
let r = verify_data_column_sidecar_kzg_proofs(columns[i][])
if r.isErr:
malformed_cols.incl(i)
if r.isErr():
debug "backfill data column validation failed",
blockRoot = shortLog(signedBlock.root),
column_sidecar = shortLog(columns[i][]),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature),
msg = r.error()

columnsOk = (dataColumnsOpt.get.lenu64 - malformed_cols.lenu64) >=
(self.consensusManager.dag.cfg.NUMBER_OF_COLUMNS div 2)
columnsOk = r.isOk()
Comment on lines 252 to +261
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't columnsOk be always the result of the last columns if without break?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that is fine actually, if it is the result of the last columns, we want the entirety of DataColumnSidecarto be columnsOk = true

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

generally, these "ok" style variables with nested if conditions are pretty hard to read - avoid if possible, ie pull the logic out into its own function and return an error directly instead - see #7518

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the ok style variables existed before because we didn't have overloaded functions for storeBlock and storeBackfillBlock, which is why there were different decisions of block addition based on the combination of blobsOk + columnsOk. i agree, later on, once simplified cleaning up makes sense.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

one of the best opportunities to do cleanup work like this is when you're reading the "previous" version of the code to understand what it's doing, as part of the due diligence to add the new version - this turns the passive reading into an active endeavor which helps cement the understanding of the existing code - ie just before adding the fulu fork - this way, the existing code gets cleaned up at a regular basis and the new fork you're adding starts out clean.


if not columnsOk:
return err(VerifierError.Invalid)
Expand All @@ -286,9 +282,9 @@ proc storeBackfillBlock(

# Only store data columns after successfully establishing block viability.
let columns = dataColumnsOpt.valueOr: DataColumnSidecars @[]
debug "Inserting columns into database (backfill)",
indices = columns.mapIt($it[].index)
for i in 0..<columns.len:
if i in malformed_cols:
continue
self.consensusManager.dag.db.putDataColumnSidecar(columns[i][])

res
Expand Down Expand Up @@ -758,6 +754,8 @@ proc storeBlock(

# write data columns now that block has been written
let data_columns = dataColumnsOpt.valueOr: DataColumnSidecars @[]
debug "Inserting columns into database",
indices = data_columns.mapIt($it.index)
for col in data_columns:
self.consensusManager.dag.db.putDataColumnSidecar(col[])

Expand Down
17 changes: 6 additions & 11 deletions beacon_chain/nimbus_beacon_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -427,9 +427,7 @@ proc initFullNode(
dag.cfg.CUSTODY_REQUIREMENT
custodyColumns =
dag.cfg.resolve_columns_from_custody_groups(
node.network.nodeId,
max(dag.cfg.SAMPLES_PER_SLOT.uint64,
localCustodyGroups))
node.network.nodeId, localCustodyGroups)

var sortedColumns = custodyColumns.toSeq()
sort(sortedColumns)
Expand Down Expand Up @@ -606,8 +604,7 @@ proc initFullNode(
if node.config.peerdasSupernode:
node.network.loadCgcnetMetadataAndEnr(dag.cfg.NUMBER_OF_CUSTODY_GROUPS.uint8)
else:
node.network.loadCgcnetMetadataAndEnr(max(dag.cfg.SAMPLES_PER_SLOT.uint8,
dag.cfg.CUSTODY_REQUIREMENT.uint8))
node.network.loadCgcnetMetadataAndEnr(dag.cfg.CUSTODY_REQUIREMENT.uint8)

if node.config.lightClientDataServe:
proc scheduleSendingLightClientUpdates(slot: Slot) =
Expand Down Expand Up @@ -1345,7 +1342,7 @@ proc addFuluMessageHandlers(
targetSubnets = node.readCustodyGroupSubnets()
custody = node.dag.cfg.get_custody_groups(
node.network.nodeId,
max(node.dag.cfg.SAMPLES_PER_SLOT, targetSubnets.uint64))
targetSubnets.uint64)

for i in custody:
let topic = getDataColumnSidecarTopic(forkDigest, i)
Expand Down Expand Up @@ -1392,8 +1389,7 @@ proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
let
targetSubnets = node.readCustodyGroupSubnets()
custody = node.dag.cfg.get_custody_groups(
node.network.nodeId,
max(node.dag.cfg.SAMPLES_PER_SLOT, targetSubnets.uint64))
node.network.nodeId, targetSubnets.uint64)

for i in custody:
let topic = getDataColumnSidecarTopic(forkDigest, i)
Expand Down Expand Up @@ -1974,11 +1970,10 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
# Update CGC and metadata with respect to the new detected validator custody
let new_vcus = CgcCount node.validatorCustody.newer_column_set.lenu64

if new_vcus > node.dag.cfg.SAMPLES_PER_SLOT.uint8:
if new_vcus > node.dag.cfg.CUSTODY_REQUIREMENT.uint8:
node.network.loadCgcnetMetadataAndEnr(new_vcus)
else:
node.network.loadCgcnetMetadataAndEnr(max(node.dag.cfg.SAMPLES_PER_SLOT.uint8,
node.dag.cfg.CUSTODY_REQUIREMENT.uint8))
node.network.loadCgcnetMetadataAndEnr(node.dag.cfg.CUSTODY_REQUIREMENT.uint8)

info "New validator custody count detected",
custody_columns = node.dataColumnQuarantine.custodyColumns.len
Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/sync/validator_custody.nim
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ proc detectNewValidatorCustody*(vcus: ValidatorCustodyRef,
newer_columns =
vcus.dag.cfg.resolve_columns_from_custody_groups(
vcus.network.nodeId,
max(vcus.dag.cfg.SAMPLES_PER_SLOT.uint64,
max(vcus.dag.cfg.CUSTODY_REQUIREMENT.uint64,
vcustody))

debug "New validator custody count detected",
Expand Down
4 changes: 1 addition & 3 deletions beacon_chain/validators/message_router.nim
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,9 @@ proc routeSignedBeaconBlock*(
# Push only those columns to processor for which we custody
let
metadata = router[].network.metadata.custody_group_count
samples_per_slot = router[].network.cfg.SAMPLES_PER_SLOT
custody_columns =
router[].network.cfg.resolve_columns_from_custody_groups(
router[].network.nodeId,
max(samples_per_slot, metadata))
router[].network.nodeId, metadata)

var final_columns: seq[ref fulu.DataColumnSidecar]
for dc in dataColumns:
Expand Down
Loading