Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 33 additions & 6 deletions beacon_node/beacon_chain/src/historical_blocks.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::data_availability_checker::{AvailableBlock, AvailableBlockData};
use crate::{BeaconChain, BeaconChainTypes, metrics};
use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics};
use itertools::Itertools;
use state_processing::{
per_block_processing::ParallelSignatureSets,
Expand Down Expand Up @@ -34,6 +34,8 @@ pub enum HistoricalBlockError {
ValidatorPubkeyCacheTimeout,
/// Logic error: should never occur.
IndexOutOfBounds,
/// Logic error: should never occur.
MissingOldestBlockRoot { slot: Slot },
/// Internal store error
StoreError(StoreError),
}
Expand All @@ -56,7 +58,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// `SignatureSetError` or `InvalidSignature` will be returned.
///
/// To align with sync we allow some excess blocks with slots greater than or equal to
/// `oldest_block_slot` to be provided. They will be ignored without being checked.
/// `oldest_block_slot` to be provided. They will be re-imported to fill the columns of the
/// checkpoint sync block.
///
/// This function should not be called concurrently with any other function that mutates
/// the anchor info (including this function itself). If a concurrent mutation occurs that
Expand All @@ -72,9 +75,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let blob_info = self.store.get_blob_info();
let data_column_info = self.store.get_data_column_info();

// Take all blocks with slots less than the oldest block slot.
// Take all blocks with slots less than or equal to the oldest block slot.
//
// This allows for reimport of the blobs/columns for the finalized block after checkpoint
// sync.
let num_relevant = blocks.partition_point(|available_block| {
available_block.block().slot() < anchor_info.oldest_block_slot
available_block.block().slot() <= anchor_info.oldest_block_slot
});

let total_blocks = blocks.len();
Expand All @@ -95,6 +101,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}

let mut expected_block_root = anchor_info.oldest_block_parent;
let mut last_block_root = expected_block_root;
let mut prev_block_slot = anchor_info.oldest_block_slot;
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;
let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot;
Expand All @@ -107,7 +114,27 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
for available_block in blocks_to_import.into_iter().rev() {
let (block_root, block, block_data) = available_block.deconstruct();

if block_root != expected_block_root {
if block.slot() == anchor_info.oldest_block_slot {
// When reimporting, verify that this is actually the same block (same block root).
let oldest_block_root = self
.block_root_at_slot(block.slot(), WhenSlotSkipped::None)
.ok()
.flatten()
.ok_or(HistoricalBlockError::MissingOldestBlockRoot { slot: block.slot() })?;
if block_root != oldest_block_root {
return Err(HistoricalBlockError::MismatchedBlockRoot {
block_root,
expected_block_root: oldest_block_root,
});
}

debug!(
?block_root,
slot = %block.slot(),
"Re-importing historic block"
);
last_block_root = block_root;
} else if block_root != expected_block_root {
return Err(HistoricalBlockError::MismatchedBlockRoot {
block_root,
expected_block_root,
Expand Down Expand Up @@ -198,7 +225,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.ok_or(HistoricalBlockError::IndexOutOfBounds)?
.iter()
.map(|block| block.parent_root())
.chain(iter::once(anchor_info.oldest_block_parent));
.chain(iter::once(last_block_root));
let signature_set = signed_blocks
.iter()
.zip_eq(block_roots)
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/execution_layer/src/test_utils/mock_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ impl<E: EthSpec> MockBuilder<E> {
.beacon_client
.get_beacon_blocks::<E>(BlockId::Finalized)
.await
.map_err(|_| "couldn't get finalized block".to_string())?
.map_err(|e| format!("couldn't get finalized block: {e:?}"))?
.ok_or_else(|| "missing finalized block".to_string())?
.data()
.message()
Expand All @@ -855,7 +855,7 @@ impl<E: EthSpec> MockBuilder<E> {
.beacon_client
.get_beacon_blocks::<E>(BlockId::Justified)
.await
.map_err(|_| "couldn't get justified block".to_string())?
.map_err(|e| format!("couldn't get justified block: {e:?}"))?
.ok_or_else(|| "missing justified block".to_string())?
.data()
.message()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -804,6 +804,16 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
// The peer is faulty if they bad signatures.
Some(PeerAction::LowToleranceError)
}
HistoricalBlockError::MissingOldestBlockRoot { slot } => {
warn!(
%slot,
error = "missing_oldest_block_root",
"Backfill batch processing error"
);
// This is an internal error, do not penalize the peer.
None
}

HistoricalBlockError::ValidatorPubkeyCacheTimeout => {
warn!(
error = "pubkey_cache_timeout",
Expand Down
8 changes: 7 additions & 1 deletion beacon_node/store/src/hot_cold_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -649,9 +649,15 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
.inspect(|cache| cache.lock().put_block(*block_root, full_block.clone()));

DatabaseBlock::Full(full_block)
} else if !self.config.prune_payloads {
} else if !self.config.prune_payloads || *block_root == split.block_root {
// If payload pruning is disabled there's a chance we may have the payload of
// this finalized block. Attempt to load it but don't error in case it's missing.
//
// We also allow for the split block's payload to be loaded *if it exists*. This is
// necessary on startup when syncing from an unaligned checkpoint (a checkpoint state
// at a skipped slot), and then loading the canonical head (with payload). If we modify
// payload pruning in future so that it doesn't prune the split block's payload, then
// this case could move to the case above where we error if the payload is missing.
let fork_name = blinded_block.fork_name(&self.spec)?;
if let Some(payload) = self.get_execution_payload(block_root, fork_name)? {
DatabaseBlock::Full(
Expand Down
1 change: 0 additions & 1 deletion book/book.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
[book]
authors = ["Paul Hauner", "Age Manning"]
language = "en"
multilingual = false
src = "src"
title = "Lighthouse Book"

Expand Down
12 changes: 6 additions & 6 deletions book/src/advanced_blobs.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@ With the [Fusaka](https://ethereum.org/roadmap/fusaka) upgrade, the main feature

Table below summarizes the role of relevant flags in Lighthouse beacon node:

| | Post-Deneb, Pre-Fulu || Post-Fulu ||
|-------|----------|----------|-----------|----------|
| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? |
| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No |
| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days |
| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days |
| | Post-Deneb, Pre-Fulu | | Post-Fulu | |
|---------------------|-------------------------------------------|--------------------------------------------------------------|--------------------------------------------------|----------------------------------------------------------|
| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? |
| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No |
| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days |
| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days |

While both `--supernode` and `--semi-supernode` can serve blobs, a supernode will be faster to respond to blobs queries as it skips the blob reconstruction step. Running a supernode also helps the network by serving the data columns to its peers.

Expand Down
10 changes: 5 additions & 5 deletions book/src/advanced_database.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,11 @@ that we have observed are:

The following table lists the data for different configurations. Note that the disk space requirement is for the `chain_db` and `freezer_db`, excluding the `blobs_db`.

| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync |
|---|---|---|---|---|
| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week |
| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week |
| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks |
| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync |
|------------------------------|---------------------|-----------------------|----------------|--------------|
| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week |
| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week |
| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks |

[Jim](https://github.com/mcdee) has done some experiments to study the response time of querying random slots (uncached query) for `--hierarchy-exponents 0,5,7,11` (per-slot diffs) and `--hierarchy-exponents 5,9,11,13,17,21` (per-epoch diffs), as show in the figures below. From the figures, two points can be concluded:

Expand Down
4 changes: 2 additions & 2 deletions book/src/api_vc_endpoints.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ Returns information regarding the health of the host machine.

| Property | Specification |
|-------------------|--------------------------------------------|
| Path | `/lighthouse/ui/health` |
| Path | `/lighthouse/ui/health` |
| Method | GET |
| Required Headers | [`Authorization`](./api_vc_auth_header.md) |
| Typical Responses | 200 |
Expand Down Expand Up @@ -178,7 +178,7 @@ Returns the graffiti that will be used for the next block proposal of each valid

| Property | Specification |
|-------------------|--------------------------------------------|
| Path | `/lighthouse/ui/graffiti` |
| Path | `/lighthouse/ui/graffiti` |
| Method | GET |
| Required Headers | [`Authorization`](./api_vc_auth_header.md) |
| Typical Responses | 200 |
Expand Down
16 changes: 8 additions & 8 deletions book/src/archived_merge_migration.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln

<div align="center">

| Network | Bellatrix | The Merge | Remark |
|---------|-------------------------------|-------------------------------| -----------|
| Ropsten | 2<sup>nd</sup> June 2022 | 8<sup>th</sup> June 2022 | Deprecated |
| Sepolia | 20<sup>th</sup> June 2022 | 6<sup>th</sup> July 2022 | |
| Goerli | 4<sup>th</sup> August 2022 | 10<sup>th</sup> August 2022 | Previously named `Prater`|
| Mainnet | 6<sup>th</sup> September 2022| 15<sup>th</sup> September 2022| |
| Chiado | 10<sup>th</sup> October 2022 | 4<sup>th</sup> November 2022 | |
| Gnosis | 30<sup>th</sup> November 2022| 8<sup>th</sup> December 2022 | |
| Network | Bellatrix | The Merge | Remark |
|---------|-------------------------------|--------------------------------|---------------------------|
| Ropsten | 2<sup>nd</sup> June 2022 | 8<sup>th</sup> June 2022 | Deprecated |
| Sepolia | 20<sup>th</sup> June 2022 | 6<sup>th</sup> July 2022 | |
| Goerli | 4<sup>th</sup> August 2022 | 10<sup>th</sup> August 2022 | Previously named `Prater` |
| Mainnet | 6<sup>th</sup> September 2022 | 15<sup>th</sup> September 2022 | |
| Chiado | 10<sup>th</sup> October 2022 | 4<sup>th</sup> November 2022 | |
| Gnosis | 30<sup>th</sup> November 2022 | 8<sup>th</sup> December 2022 | |

</div>

Expand Down