From a34c5110c6c832020775ffdc97a9f4f0b098f2b3 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 29 Apr 2025 15:56:57 +0530 Subject: [PATCH 1/6] save work --- beacon_chain/spec/datatypes/fulu.nim | 13 +++++ beacon_chain/sync/request_manager.nim | 84 ++++++++++++--------------- beacon_chain/sync/sync_protocol.nim | 39 +++++++------ 3 files changed, 72 insertions(+), 64 deletions(-) diff --git a/beacon_chain/spec/datatypes/fulu.nim b/beacon_chain/spec/datatypes/fulu.nim index fed5af63a3..c137e9fc93 100644 --- a/beacon_chain/spec/datatypes/fulu.nim +++ b/beacon_chain/spec/datatypes/fulu.nim @@ -113,6 +113,10 @@ type block_root*: Eth2Digest index*: ColumnIndex + DataColumnsByRootIdentifier* = object + block_root*: Eth2Digest + indices*: List[ColumnIndex, Limit NUMBER_OF_COLUMNS] + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#matrixentry MatrixEntry* = object cell*: Cell @@ -615,6 +619,15 @@ func shortLog*(v: seq[DataColumnSidecar]): auto = func shortLog*(x: seq[DataColumnIdentifier]): string = "[" & x.mapIt(shortLog(it.block_root) & "/" & $it.index).join(", ") & "]" +func shortLog*(xs: seq[DataColumnsByRootIdentifier]): string = + ## Formats like: [abcd…/0,2,4, ef09…/1,3] + "[" & + xs.mapIt( + shortLog(it.block_root) & "/" & + it.indices.mapIt($it).join(",") + ).join(", ") & + "]" + func shortLog*(x: seq[ColumnIndex]): string = "<" & x.mapIt($it).join(", ") & ">" diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 7efcb6900a..1571add781 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -8,6 +8,7 @@ {.push raises: [].} import chronos, chronicles +import ssz_serialization/types import ../spec/[forks, network, peerdas_helpers], ../networking/eth2_network, @@ -57,7 +58,7 @@ type blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].} DataColumnLoaderFn = proc( - columnId: DataColumnIdentifier): + columnIds: DataColumnsByRootIdentifier): Opt[ref DataColumnSidecar] {.gcsafe, raises: [].} InhibitFn = proc: bool {.gcsafe, raises: [].} @@ -130,6 +131,9 @@ func cmpSidecarIdentifier(x: BlobIdentifier | DataColumnIdentifier, y: ref BlobSidecar | ref DataColumnSidecar): int = cmp(x.index, y[].index) +func cmpColumnIndex(x: ColumnIndex, y: ref DataColumnSidecar): int = + cmp(x, y[].index) + func checkResponseSanity(idList: seq[BlobIdentifier], blobs: openArray[ref BlobSidecar]): bool = # Cannot respond more than what I have asked @@ -162,36 +166,24 @@ func checkResponseSubset(idList: seq[BlobIdentifier], return false true -func checkResponseSanity(idList: seq[DataColumnIdentifier], - columns: openArray[ref DataColumnSidecar]): bool = - # Cannot respond more than what I have asked - if columns.len > idList.len: - return false - var i = 0 - while i < columns.len: - let - block_root = - hash_tree_root(columns[i][].signed_block_header.message) - idListKey = binarySearch(idList, columns[i], cmpSidecarIdentifier) - - # Verify the block root - if idList[idListKey].block_root != block_root: - return false - - # Verify inclusion proof - columns[i][].verify_data_column_sidecar_inclusion_proof().isOkOr: - return false - inc i - true - -func checkResponseSubset(idList: seq[DataColumnIdentifier], - columns: openArray[ref DataColumnSidecar]): bool = - ## Clients MUST respond with at least one sidecar, if they have it. - ## Clients MAY limit the number of blocks and sidecars in the response. - ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 - for col in columns: - if binarySearch(idList, col, cmpSidecarIdentifier) == -1: +func checkColumnResponse(idList: seq[DataColumnsByRootIdentifier], + columns: openArray[ref DataColumnSidecar]): bool = + for colresp in columns: + let block_root = + hash_tree_root(colresp[].signed_block_header.message) + if block_root notin idList.mapIt(it.block_root): + # received a response that does not match the + # block root of any of the items that were requested return false + for id in idList: + if id.block_root == block_root: + if binarySearch(id.indices.asSeq, colresp, cmpColumnIndex) == -1: + # at the common block root level, the response + # is NOT a subset of the request ids + return false + # verify the inclusion proof + colresp[].verify_data_column_sidecar_inclusion_proof().isOkOr: + return false true proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: (raises: [CancelledError]).} = @@ -364,30 +356,24 @@ proc checkPeerCustody(rman: RequestManager, return false proc fetchDataColumnsFromNetwork(rman: RequestManager, - colIdList: seq[DataColumnIdentifier]) + colIdList: seq[DataColumnsByRootIdentifier]) {.async: (raises: [CancelledError]).} = var peer = await rman.network.peerPool.acquire() try: if rman.checkPeerCustody(peer): debug "Requesting data columns by root", peer = peer, columns = shortLog(colIdList), peer_score = peer.getScore() - let columns = await dataColumnSidecarsByRoot(peer, DataColumnIdentifierList colIdList) + let columns = await dataColumnSidecarsByRoot(peer, DataColumnsByRootIdentifierList colIdList) if columns.isOk: var ucolumns = columns.get().asSeq() ucolumns.sort(cmpSidecarIndexes) - if not checkResponseSubset(colIdList, ucolumns): + if not checkColumnResponse(colIdList, ucolumns): debug "Response to columns by root is not a subset", peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns) peer.updateScore(PeerScoreBadResponse) return - if not checkResponseSanity(colIdList, ucolumns): - debug "Response to columns by root have erroneous block root", - peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns) - peer.updateScore(PeerScoreBadResponse) - return - for col in ucolumns: rman.dataColumnQuarantine[].put(col) var curRoot: Eth2Digest @@ -575,7 +561,7 @@ proc requestManagerBlobLoop( blobs_count = len(blobIds), sync_speed = speed(start, finish) -proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnIdentifier] = +proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnsByRootIdentifier] = let wallTime = rman.getBeaconTime() wallSlot = wallTime.slotOrZero() @@ -584,7 +570,7 @@ proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnIdentifier] const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS) var - fetches: HashSet[DataColumnIdentifier] + fetches: HashSet[DataColumnsByRootIdentifier] ready: seq[Eth2Digest] for columnless in rman.quarantine[].peekColumnless(): @@ -601,10 +587,16 @@ proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnIdentifier] warn "quarantine is missing data columns, but missing indices are empty", blk = columnless.root, commitments = len(forkyBlck.message.body.blob_kzg_commitments) - for idx in missing.indices: - let id = DataColumnIdentifier(block_root: columnless.root, index: idx) - if id.index in rman.custody_columns_set and id notin fetches and - len(forkyBlck.message.body.blob_kzg_commitments) != 0: + + let id = DataColumnsByRootIdentifier( + block_root: columnless.root, + indices: List[ColumnIndex, NUMBER_OF_COLUMNS].init(missing.indices)) + for index in id.indices: + if not(index in rman.custody_columns_set and id notin fetches and + len(forkyBlck.message.body.blob_kzg_commitments) != 0): + # do not include to fetches + discard + else: fetches.incl(id) else: # this is a programming error and it not should occur @@ -631,7 +623,7 @@ proc requestManagerDataColumnLoop( if missingColumnIds.len == 0: continue - var columnIds: seq[DataColumnIdentifier] + var columnIds: seq[DataColumnsByRootIdentifier] if rman.dataColumnLoader == nil: for item in missingColumnIds: columnIds.add item diff --git a/beacon_chain/sync/sync_protocol.nim b/beacon_chain/sync/sync_protocol.nim index 4b1450655e..07f9e74153 100644 --- a/beacon_chain/sync/sync_protocol.nim +++ b/beacon_chain/sync/sync_protocol.nim @@ -41,6 +41,8 @@ type BlobIdentifier, Limit MAX_SUPPORTED_REQUEST_BLOB_SIDECARS] DataColumnIdentifierList* = List[ DataColumnIdentifier, Limit (MAX_REQUEST_DATA_COLUMN_SIDECARS)] + DataColumnsByRootIdentifierList* = List[ + DataColumnsByRootIdentifier, Limit (MAX_REQUEST_BLOCKS_DENEB)] proc readChunkPayload*( conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)): @@ -393,7 +395,7 @@ p2pProtocol BeaconSync(version = 1, # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 proc dataColumnSidecarsByRoot( peer: Peer, - colIds: DataColumnIdentifierList, + colIds: DataColumnsByRootIdentifierList, response: MultipleChunksResponse[ ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMN_SIDECARS)]) {.async, libp2pProtocol("data_column_sidecars_by_root", 1).} = @@ -402,7 +404,7 @@ p2pProtocol BeaconSync(version = 1, if colIds.len == 0: raise newException(InvalidInputsError, "No data columns request for root") - if colIds.lenu64 > MAX_REQUEST_DATA_COLUMN_SIDECARS: + if colIds.lenu64 > MAX_REQUEST_BLOCKS_DENEB: raise newException(InvalidInputsError, "Exceeding data column request limit") let @@ -417,25 +419,26 @@ p2pProtocol BeaconSync(version = 1, let blockRef = dag.getBlockRef(colIds[i].block_root).valueOr: continue - let index = - colIds[i].index - if dag.db.getDataColumnSidecarSZ(blockRef.bid.root, index, bytes): - let uncompressedLen = uncompressedLenFramed(bytes).valueOr: - warn "Cannot read data column size, database corrupt?", - bytes = bytes.len, blck = shortLog(blockRef), columnIndex = index - continue + let indices = + colIds[i].indices + for id in indices: + if dag.db.getDataColumnSidecarSZ(blockRef.bid.root, id, bytes): + let uncompressedLen = uncompressedLenFramed(bytes).valueOr: + warn "Cannot read data column size, database corrupt?", + bytes = bytes.len, blck = shortLog(blockRef), columnIndex = id + continue - peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1") - peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1") + peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1") + peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1") - await response.writeBytesSZ( - uncompressedLen, bytes, - peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data) - inc found + await response.writeBytesSZ( + uncompressedLen, bytes, + peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data) + inc found - # additional logging for devnets - debug "responsded to data column sidecar by root request", - peer, blck = shortLog(blockRef), columnIndex = index + # additional logging for devnets + debug "responsded to data column sidecar by root request", + peer, blck = shortLog(blockRef), columnIndex = id debug "Data column root request done", peer, roots = colIds.len, count, found From 53b5e6a4790f5a79550b6e2740e21de8dcae74e2 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 29 Apr 2025 17:34:31 +0530 Subject: [PATCH 2/6] refactor more, save work --- beacon_chain/nimbus_beacon_node.nim | 13 +++++++------ beacon_chain/spec/datatypes/fulu.nim | 3 ++- beacon_chain/sync/request_manager.nim | 17 ++++++++--------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 769167e094..c0d48c58df 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -482,13 +482,14 @@ proc initFullNode( Opt.some blob_sidecar else: Opt.none(ref BlobSidecar) - rmanDataColumnLoader = proc( - columnId: DataColumnIdentifier): Opt[ref DataColumnSidecar] = + rmanDataColumnsLoader = proc( + columnId: DataColumnsByRootIdentifier): Opt[ref DataColumnSidecar] = var data_column_sidecar = DataColumnSidecar.new() - if dag.db.getDataColumnSidecar(columnId.block_root, columnId.index, data_column_sidecar[]): - Opt.some data_column_sidecar - else: - Opt.none(ref DataColumnSidecar) + for index in columnId.indices: + if dag.db.getDataColumnSidecar(columnId.block_root, index, data_column_sidecar[]): + Opt.some data_column_sidecar + else: + Opt.none(ref DataColumnSidecar) processor = Eth2Processor.new( config.doppelgangerDetection, diff --git a/beacon_chain/spec/datatypes/fulu.nim b/beacon_chain/spec/datatypes/fulu.nim index c137e9fc93..3f7b3bb5a7 100644 --- a/beacon_chain/spec/datatypes/fulu.nim +++ b/beacon_chain/spec/datatypes/fulu.nim @@ -95,6 +95,7 @@ type type DataColumn* = List[KzgCell, Limit(MAX_BLOB_COMMITMENTS_PER_BLOCK)] + ColumnIndices* = List[ColumnIndex, Limit(NUMBER_OF_COLUMNS)] # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#datacolumnsidecar DataColumnSidecar* = object @@ -115,7 +116,7 @@ type DataColumnsByRootIdentifier* = object block_root*: Eth2Digest - indices*: List[ColumnIndex, Limit NUMBER_OF_COLUMNS] + indices*: ColumnIndices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#matrixentry MatrixEntry* = object diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 1571add781..cd532f7164 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -57,9 +57,9 @@ type BlobLoaderFn = proc( blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].} - DataColumnLoaderFn = proc( + DataColumnsLoaderFn = proc( columnIds: DataColumnsByRootIdentifier): - Opt[ref DataColumnSidecar] {.gcsafe, raises: [].} + Opt[DataColumnSidecars] {.gcsafe, raises: [].} InhibitFn = proc: bool {.gcsafe, raises: [].} @@ -561,7 +561,7 @@ proc requestManagerBlobLoop( blobs_count = len(blobIds), sync_speed = speed(start, finish) -proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnsByRootIdentifier] = +proc getMissingDataColumns(rman: RequestManager): seq[DataColumnsByRootIdentifier] = let wallTime = rman.getBeaconTime() wallSlot = wallTime.slotOrZero() @@ -570,7 +570,7 @@ proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnsByRootIdent const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS) var - fetches: HashSet[DataColumnsByRootIdentifier] + fetches: seq[DataColumnsByRootIdentifier] ready: seq[Eth2Digest] for columnless in rman.quarantine[].peekColumnless(): @@ -590,14 +590,14 @@ proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnsByRootIdent let id = DataColumnsByRootIdentifier( block_root: columnless.root, - indices: List[ColumnIndex, NUMBER_OF_COLUMNS].init(missing.indices)) - for index in id.indices: + indices: ColumnIndices.init(missing.indices)) + for index in id.indices.asSeq: if not(index in rman.custody_columns_set and id notin fetches and len(forkyBlck.message.body.blob_kzg_commitments) != 0): # do not include to fetches discard else: - fetches.incl(id) + fetches.add(id) else: # this is a programming error and it not should occur warn "missing column handler found columnless block with all data columns", @@ -625,8 +625,7 @@ proc requestManagerDataColumnLoop( var columnIds: seq[DataColumnsByRootIdentifier] if rman.dataColumnLoader == nil: - for item in missingColumnIds: - columnIds.add item + columnIds = missingColumnIds else: var blockRoots: seq[Eth2Digest] From 7b63b85c2fa4ed9efcc7df71dcbd301878218cb7 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 29 Apr 2025 19:25:54 +0530 Subject: [PATCH 3/6] add spec --- beacon_chain/nimbus_beacon_node.nim | 13 ++++++------- beacon_chain/sync/request_manager.nim | 26 +++++++++++++++----------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index c0d48c58df..769167e094 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -482,14 +482,13 @@ proc initFullNode( Opt.some blob_sidecar else: Opt.none(ref BlobSidecar) - rmanDataColumnsLoader = proc( - columnId: DataColumnsByRootIdentifier): Opt[ref DataColumnSidecar] = + rmanDataColumnLoader = proc( + columnId: DataColumnIdentifier): Opt[ref DataColumnSidecar] = var data_column_sidecar = DataColumnSidecar.new() - for index in columnId.indices: - if dag.db.getDataColumnSidecar(columnId.block_root, index, data_column_sidecar[]): - Opt.some data_column_sidecar - else: - Opt.none(ref DataColumnSidecar) + if dag.db.getDataColumnSidecar(columnId.block_root, columnId.index, data_column_sidecar[]): + Opt.some data_column_sidecar + else: + Opt.none(ref DataColumnSidecar) processor = Eth2Processor.new( config.doppelgangerDetection, diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index cd532f7164..70ab1fa61d 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -57,9 +57,9 @@ type BlobLoaderFn = proc( blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].} - DataColumnsLoaderFn = proc( - columnIds: DataColumnsByRootIdentifier): - Opt[DataColumnSidecars] {.gcsafe, raises: [].} + DataColumnLoaderFn = proc( + columnIds: DataColumnIdentifier): + Opt[ref DataColumnSidecar] {.gcsafe, raises: [].} InhibitFn = proc: bool {.gcsafe, raises: [].} @@ -634,14 +634,18 @@ proc requestManagerDataColumnLoop( if columnId.block_root != curRoot: curRoot = columnId.block_root blockRoots.add curRoot - let data_column_sidecar = rman.dataColumnLoader(columnId).valueOr: - columnIds.add columnId - if blockRoots.len > 0 and blockRoots[^1] == curRoot: - # A data column is missing, remove from list of fully available data columns - discard blockRoots.pop() - continue - debug "Loaded orphaned data columns from storage", columnId - rman.dataColumnQuarantine[].put(data_column_sidecar) + for index in columnId.indices: + let loaderElem = DataColumnIdentifier( + block_root: columnId.block_root, + index: index) + let data_column_sidecar = rman.dataColumnLoader(loaderElem).valueOr: + columnIds.add columnId + if blockRoots.len > 0 and blockRoots[^1] == curRoot: + # A data column is missing, remove from list of fully available data columns + discard blockRoots.pop() + continue + debug "Loaded orphaned data columns from storage", columnId + rman.dataColumnQuarantine[].put(data_column_sidecar) var verifiers = newSeqOfCap[ Future[Result[void, VerifierError]] .Raising([CancelledError])](blockRoots.len) From 3848e271257a4fac9ed8d94446de0bf4d72a8a59 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Thu, 1 May 2025 15:51:44 +0530 Subject: [PATCH 4/6] address reviews --- beacon_chain/spec/datatypes/fulu.nim | 5 +++-- beacon_chain/sync/request_manager.nim | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/beacon_chain/spec/datatypes/fulu.nim b/beacon_chain/spec/datatypes/fulu.nim index 3f7b3bb5a7..76e70460d5 100644 --- a/beacon_chain/spec/datatypes/fulu.nim +++ b/beacon_chain/spec/datatypes/fulu.nim @@ -95,7 +95,7 @@ type type DataColumn* = List[KzgCell, Limit(MAX_BLOB_COMMITMENTS_PER_BLOCK)] - ColumnIndices* = List[ColumnIndex, Limit(NUMBER_OF_COLUMNS)] + DataColumnIndices* = List[ColumnIndex, Limit(NUMBER_OF_COLUMNS)] # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#datacolumnsidecar DataColumnSidecar* = object @@ -114,9 +114,10 @@ type block_root*: Eth2Digest index*: ColumnIndex + # https://github.com/ethereum/consensus-specs/blob/b8b5fbb8d16f52d42a716fa93289062fe2124c7c/specs/fulu/p2p-interface.md#datacolumnsbyrootidentifier DataColumnsByRootIdentifier* = object block_root*: Eth2Digest - indices*: ColumnIndices + indices*: DataColumnIndices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#matrixentry MatrixEntry* = object diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 70ab1fa61d..091f3a4616 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -590,7 +590,7 @@ proc getMissingDataColumns(rman: RequestManager): seq[DataColumnsByRootIdentifie let id = DataColumnsByRootIdentifier( block_root: columnless.root, - indices: ColumnIndices.init(missing.indices)) + indices: DataColumnIndices.init(missing.indices)) for index in id.indices.asSeq: if not(index in rman.custody_columns_set and id notin fetches and len(forkyBlck.message.body.blob_kzg_commitments) != 0): From 95c3d67f16b2c0d059bb43264d6f7cbd1386d21f Mon Sep 17 00:00:00 2001 From: agnxsh Date: Thu, 1 May 2025 16:01:44 +0530 Subject: [PATCH 5/6] updated link --- beacon_chain/sync/sync_protocol.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/sync/sync_protocol.nim b/beacon_chain/sync/sync_protocol.nim index 07f9e74153..aca97a50bc 100644 --- a/beacon_chain/sync/sync_protocol.nim +++ b/beacon_chain/sync/sync_protocol.nim @@ -392,7 +392,7 @@ p2pProtocol BeaconSync(version = 1, peer.networkState.dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, peer.networkState.dag.cfg.MAX_REQUEST_BLOB_SIDECARS_ELECTRA) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 + # https://github.com/ethereum/consensus-specs/blob/b8b5fbb8d16f52d42a716fa93289062fe2124c7c/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 proc dataColumnSidecarsByRoot( peer: Peer, colIds: DataColumnsByRootIdentifierList, From f490c893ea4a54c87f5ac5fad0628b45f491f00c Mon Sep 17 00:00:00 2001 From: agnxsh Date: Thu, 1 May 2025 16:03:40 +0530 Subject: [PATCH 6/6] that shouldn't be columnIds --- beacon_chain/sync/request_manager.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 091f3a4616..7ef0a61b45 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -58,7 +58,7 @@ type blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].} DataColumnLoaderFn = proc( - columnIds: DataColumnIdentifier): + columnId: DataColumnIdentifier): Opt[ref DataColumnSidecar] {.gcsafe, raises: [].} InhibitFn = proc: bool {.gcsafe, raises: [].}