Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,12 @@ jobs:
- name: Upload combined results
uses: actions/upload-artifact@v4
with:
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}
# upload-artifact requires avoiding "/", because "To maintain file
# system agnostic behavior, these characters are intentionally not
# allowed to prevent potential problems with downloads on different
# file systems". However, GitHub Actions workflows do not support a
# usual assortment of string functions.
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch == 'upstream/version-2-0' && 'version-2-0' || matrix.branch }}
path: build/*.xml

devbuild:
Expand Down
16 changes: 7 additions & 9 deletions beacon_chain/consensus_object_pools/blockchain_dag.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand Down Expand Up @@ -1639,11 +1639,11 @@ template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork =
forkAtEpoch(dag.cfg, epoch)

proc getBlockRange*(
dag: ChainDAGRef, startSlot: Slot, skipStep: uint64,
dag: ChainDAGRef, startSlot: Slot,
output: var openArray[BlockId]): Natural =
## This function populates an `output` buffer of blocks
## with a slots ranging from `startSlot` up to, but not including,
## `startSlot + skipStep * output.len`, skipping any slots that don't have
## `startSlot + output.len`, skipping any slots that don't have
## a block.
##
## Blocks will be written to `output` from the end without gaps, even if
Expand All @@ -1657,7 +1657,7 @@ proc getBlockRange*(
headSlot = dag.head.slot

trace "getBlockRange entered",
head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot
head = shortLog(dag.head.root), requestedCount, startSlot, headSlot

if startSlot < dag.backfill.slot:
debug "Got request for pre-backfill slot",
Expand All @@ -1671,11 +1671,9 @@ proc getBlockRange*(
runway = uint64(headSlot - startSlot)

# This is the number of blocks that will follow the start block
extraSlots = min(runway div skipStep, requestedCount - 1)
extraSlots = min(runway, requestedCount - 1)

# If `skipStep` is very large, `extraSlots` should be 0 from
# the previous line, so `endSlot` will be equal to `startSlot`:
endSlot = startSlot + extraSlots * skipStep
endSlot = startSlot + extraSlots

var
curSlot = endSlot
Expand All @@ -1687,7 +1685,7 @@ proc getBlockRange*(
if bs.isSome and bs.get().isProposed():
o -= 1
output[o] = bs.get().bid
curSlot -= skipStep
curSlot -= 1

# Handle start slot separately (to avoid underflow when computing curSlot)
let bs = dag.getBlockIdAtSlot(startSlot)
Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/nimbus_beacon_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -1595,7 +1595,7 @@ proc pruneBlobs(node: BeaconNode, slot: Slot) =
var blocks: array[SLOTS_PER_EPOCH.int, BlockId]
var count = 0
let startIndex = node.dag.getBlockRange(
blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1))
blobPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1))
for i in startIndex..<SLOTS_PER_EPOCH:
let blck = node.dag.getForkedBlock(blocks[int(i)]).valueOr: continue
withBlck(blck):
Expand Down
24 changes: 10 additions & 14 deletions beacon_chain/sync/sync_protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

import
chronicles, chronos, snappy, snappy/codec,
../spec/datatypes/[phase0, altair, bellatrix, capella, deneb],
../spec/[helpers, forks, network],
".."/[beacon_clock],
../networking/eth2_network,
Expand Down Expand Up @@ -167,7 +166,7 @@ template getBlobSidecarsByRange(
count = int min(reqCount, blockIds.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex))

var
found = 0
Expand Down Expand Up @@ -222,12 +221,11 @@ p2pProtocol BeaconSync(version = 1,
# client call that returns `seq[ref ForkedSignedBeaconBlock]` will
# will be generated by the libp2p macro - we guarantee that seq items
# are `not-nil` in the implementation
# TODO reqStep is deprecated - future versions can remove support for
# values != 1: https://github.com/ethereum/consensus-specs/pull/2856

trace "got range request", peer, startSlot,
count = reqCount, step = reqStep
if reqCount == 0 or reqStep == 0:
trace "got range request", peer, startSlot, count = reqCount
# https://github.com/ethereum/consensus-specs/pull/2856
if reqStep != 1:
raise newException(InvalidInputsError, "Step size must be 1")
if reqCount == 0:
raise newException(InvalidInputsError, "Empty range requested")

var blocks: array[MAX_REQUEST_BLOCKS.int, BlockId]
Expand All @@ -236,9 +234,8 @@ p2pProtocol BeaconSync(version = 1,
# Limit number of blocks in response
count = int min(reqCount, blocks.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, reqStep,
blocks.toOpenArray(0, endIndex))
startIndex = dag.getBlockRange(
startSlot, blocks.toOpenArray(0, endIndex))

var
found = 0
Expand Down Expand Up @@ -268,8 +265,7 @@ p2pProtocol BeaconSync(version = 1,

inc found

debug "Block range request done",
peer, startSlot, count, reqStep
debug "Block range request done", peer, startSlot, count

proc beaconBlocksByRoot_v2(
peer: Peer,
Expand Down Expand Up @@ -459,7 +455,7 @@ p2pProtocol BeaconSync(version = 1,
count = int min(reqCount, blockIds.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex))

var
found = 0
Expand Down
13 changes: 8 additions & 5 deletions ncli/ncli_db.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2020-2024 Status Research & Development GmbH
# Copyright (c) 2020-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand All @@ -8,7 +8,7 @@
{.push raises: [].}

import
std/[os, stats, tables],
std/tables,
snappy,
chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3,
../beacon_chain/networking/network_metadata,
Expand All @@ -21,6 +21,9 @@ import
../research/simutils,
./era, ./ncli_common, ./validator_db_aggregator

from std/os import createDir, dirExists, moveFile, `/`
from std/stats import RunningStat

when defined(posix):
import system/ansi_c

Expand Down Expand Up @@ -641,7 +644,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
if firstSlot.isSome():
withTimer(timers[tBlocks]):
var blocks: array[SLOTS_PER_HISTORICAL_ROOT.int, BlockId]
for i in dag.getBlockRange(firstSlot.get(), 1, blocks)..<blocks.len:
for i in dag.getBlockRange(firstSlot.get(), blocks)..<blocks.len:
if not dag.getBlockSZ(blocks[i], tmp):
break writeFileBlock
group.update(e2, blocks[i].slot, tmp).get()
Expand Down Expand Up @@ -901,11 +904,11 @@ proc createInsertValidatorProc(db: SqStoreRef): auto =
VALUES(?, ?);""",
(int64, array[48, byte]), void).expect("DB")

proc collectBalances(balances: var seq[uint64], forkedState: ForkedHashedBeaconState) =
func collectBalances(balances: var seq[uint64], forkedState: ForkedHashedBeaconState) =
withState(forkedState):
balances = seq[uint64](forkyState.data.balances.data)

proc calculateDelta(info: RewardsAndPenalties): int64 =
func calculateDelta(info: RewardsAndPenalties): int64 =
info.source_outcome +
info.target_outcome +
info.head_outcome +
Expand Down
17 changes: 7 additions & 10 deletions scripts/launch_local_testnet.sh
Original file line number Diff line number Diff line change
@@ -1,16 +1,13 @@
#!/usr/bin/env bash

# Copyright (c) 2020-2024 Status Research & Development GmbH. Licensed under
# Copyright (c) 2020-2025 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.

# Mostly a duplication of "tests/simulation/{start.sh,run_node.sh}", but with a focus on
# replicating testnets as closely as possible, which means following the Docker execution labyrinth.

set -euo pipefail
set -Eeuo pipefail

SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")"
cd "$SCRIPTS_DIR/.."
Expand Down Expand Up @@ -881,7 +878,7 @@ fi

jq -r '.hash' "$EXECUTION_GENESIS_BLOCK_JSON" > "${DATA_DIR}/deposit_contract_block_hash.txt"

for NUM_NODE in $(seq 1 $NUM_NODES); do
for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}"
rm -rf "${NODE_DATA_DIR}"
scripts/makedir.sh "${NODE_DATA_DIR}" 2>&1
Expand Down Expand Up @@ -922,7 +919,7 @@ DIRECTPEER_ENR=$(

cp "$SCRIPTS_DIR/$CONST_PRESET-non-overriden-config.yaml" "$RUNTIME_CONFIG_FILE"
# TODO the runtime config file should be used during deposit generation as well!
echo Wrote $RUNTIME_CONFIG_FILE:
echo Wrote "${RUNTIME_CONFIG_FILE}":
tee -a "$RUNTIME_CONFIG_FILE" <<EOF
PRESET_BASE: ${CONST_PRESET}
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: ${TOTAL_VALIDATORS}
Expand Down Expand Up @@ -1157,7 +1154,7 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
fi
done

./build/${LH_BINARY} vc \
./build/"${LH_BINARY}" vc \
--debug-level "debug" \
--logfile-max-number 0 \
--log-format "JSON" \
Expand All @@ -1171,7 +1168,7 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
else
./build/nimbus_validator_client \
--log-level="${LOG_LEVEL}" \
${STOP_AT_EPOCH_FLAG} \
"${STOP_AT_EPOCH_FLAG}" \
--data-dir="${VALIDATOR_DATA_DIR}" \
--metrics \
--metrics-port=$(( BASE_VC_METRICS_PORT + NUM_NODE - 1 )) \
Expand Down Expand Up @@ -1257,7 +1254,7 @@ if [ "$LC_NODES" -ge "1" ]; then
--trusted-block-root="${LC_TRUSTED_BLOCK_ROOT}" \
--jwt-secret="${JWT_FILE}" \
"${WEB3_ARG[@]}" \
${STOP_AT_EPOCH_FLAG} \
"${STOP_AT_EPOCH_FLAG}" \
&> "${DATA_DIR}/logs/nimbus_light_client.${NUM_LC}.jsonl" &
PID=$!
PIDS_TO_WAIT="${PIDS_TO_WAIT},${PID}"
Expand Down
24 changes: 0 additions & 24 deletions scripts/repo_paths.sh

This file was deleted.

34 changes: 5 additions & 29 deletions tests/test_blockchain_dag.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand Down Expand Up @@ -147,41 +147,17 @@ suite "Block pool processing" & preset():
var blocks: array[3, BlockId]

check:
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 0)) == 0
dag.getBlockRange(Slot(0), blocks.toOpenArray(0, 0)) == 0
blocks[0..<1] == [dag.tail]

dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 1)) == 0
dag.getBlockRange(Slot(0), blocks.toOpenArray(0, 1)) == 0
blocks[0..<2] == [dag.tail, b1Add[].bid]

dag.getBlockRange(Slot(0), 2, blocks.toOpenArray(0, 1)) == 0
blocks[0..<2] == [dag.tail, b2Add[].bid]

dag.getBlockRange(Slot(0), 3, blocks.toOpenArray(0, 1)) == 1
blocks[1..<2] == [dag.tail] # block 3 is missing!

dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, 1)) == 0
blocks[0..<2] == [b2Add[].bid, b4Add[].bid] # block 3 is missing!

# large skip step
dag.getBlockRange(Slot(0), uint64.high, blocks.toOpenArray(0, 2)) == 2
blocks[2..2] == [dag.tail]

# large skip step
dag.getBlockRange(Slot(2), uint64.high, blocks.toOpenArray(0, 1)) == 1
blocks[1..1] == [b2Add[].bid]

# empty length
dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, -1)) == 0

# No blocks in sight
dag.getBlockRange(Slot(5), 1, blocks.toOpenArray(0, 1)) == 2
dag.getBlockRange(Slot(5), blocks.toOpenArray(0, 1)) == 2

# No blocks in sight
dag.getBlockRange(Slot(uint64.high), 1, blocks.toOpenArray(0, 1)) == 2

# No blocks in sight either due to gaps
dag.getBlockRange(Slot(3), 2, blocks.toOpenArray(0, 1)) == 2
blocks[2..<2].len == 0
dag.getBlockRange(Slot(uint64.high), blocks.toOpenArray(0, 1)) == 2

# A fork forces the clearance state to a point where it cannot be advanced
let
Expand Down