Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions http_api/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ use ssz::{ReadError, H256};
use thiserror::Error;
use tokio::task::JoinError;
use types::{
deneb::primitives::BlobIndex, fulu::primitives::ColumnIndex, phase0::primitives::Slot,
deneb::primitives::{BlobIndex, VersionedHash},
fulu::primitives::ColumnIndex,
phase0::primitives::Slot,
};

#[derive(Debug, Error)]
Expand Down Expand Up @@ -150,6 +152,8 @@ pub enum Error {
UnableToProduceBlindedBlock,
#[error("validator not found")]
ValidatorNotFound,
#[error("versioned hash not in block: {versioned_hash:?}")]
VersionedHashNotInBlock { versioned_hash: VersionedHash },
// TODO(Grandine Team): Some API clients do not set `validator_index`.
// See <https://github.com/attestantio/vouch/issues/75>.
// #[error("validator not in committee: {validator_index}")]
Expand Down Expand Up @@ -236,7 +240,8 @@ impl Error {
| Self::StatePreCapella
| Self::StatePreElectra
| Self::StatePreFulu
| Self::UnableToPublishBlock => StatusCode::BAD_REQUEST,
| Self::UnableToPublishBlock
| Self::VersionedHashNotInBlock { .. } => StatusCode::BAD_REQUEST,
// | Self::ValidatorNotInCommittee { .. }
Self::Internal(_)
| Self::Canceled(_)
Expand Down
66 changes: 52 additions & 14 deletions http_api/src/standard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

use core::time::Duration;
use std::{
collections::{BTreeMap, HashSet},
collections::{BTreeMap, BTreeSet, HashSet},
sync::Arc,
};

Expand Down Expand Up @@ -71,7 +71,7 @@ use types::{
config::Config as ChainConfig,
deneb::{
containers::{BlobIdentifier, BlobSidecar},
primitives::{Blob, BlobIndex, KzgProof},
primitives::{Blob, BlobIndex, KzgProof, VersionedHash},
},
fulu::{
containers::{DataColumnIdentifier, DataColumnSidecar, MatrixEntry},
Expand All @@ -93,7 +93,9 @@ use types::{
},
},
preset::{Preset, SyncSubcommitteeSize},
traits::{BeaconBlock as _, BeaconState as _, SignedBeaconBlock as _},
traits::{
BeaconBlock as _, BeaconState as _, PostDenebBeaconBlockBody, SignedBeaconBlock as _,
},
};
use validator::{ApiToValidator, ValidatorConfig};

Expand Down Expand Up @@ -121,6 +123,12 @@ pub struct BlobSidecarsQuery {
indices: Option<Vec<BlobIndex>>,
}

#[derive(Deserialize)]
#[serde(deny_unknown_fields)]
pub struct BlobsQuery {
versioned_hashes: Option<Vec<VersionedHash>>,
}

#[derive(Deserialize)]
#[serde(deny_unknown_fields)]
pub struct DataColumnSidecarsQuery {
Expand Down Expand Up @@ -1271,7 +1279,7 @@ pub async fn blobs<P: Preset, W: Wait>(
State(metrics): State<Option<Arc<Metrics>>>,
State(anchor_checkpoint_provider): State<AnchorCheckpointProvider<P>>,
EthPath(block_id): EthPath<BlockId>,
EthQuery(query): EthQuery<BlobSidecarsQuery>,
EthQuery(query): EthQuery<BlobsQuery>,
headers: HeaderMap,
) -> Result<EthResponse<DynamicList<Blob<P>>, (), JsonOrSsz>, Error> {
let WithStatus {
Expand All @@ -1285,6 +1293,40 @@ pub async fn blobs<P: Preset, W: Wait>(
let epoch = misc::compute_epoch_at_slot::<P>(block.message().slot());
let max_blobs_per_block = controller.chain_config().max_blobs_per_block(epoch);

let mut requested_indices = None;

if let Some(versioned_hashes) = query.versioned_hashes {
let Some(kzg_commitments) = block
.message()
.body()
.post_deneb()
.map(PostDenebBeaconBlockBody::blob_kzg_commitments)
else {
return Ok(EthResponse::json_or_ssz(DynamicList::empty(), &headers)?
.execution_optimistic(status.is_optimistic())
.finalized(finalized));
};

let block_versioned_hashes = kzg_commitments
.iter()
.copied()
.map(misc::kzg_commitment_to_versioned_hash)
.collect::<Vec<_>>();

let mut indices = BTreeSet::new();

for versioned_hash in versioned_hashes {
let index = block_versioned_hashes
.iter()
.position(|block_versioned_hash| *block_versioned_hash == versioned_hash)
.ok_or(Error::VersionedHashNotInBlock { versioned_hash })?;

indices.insert(u64::try_from(index).expect("position should fit in u64"));
}

requested_indices = Some(indices);
}

let blobs = if version.is_peerdas_activated() {
let data_column_sidecars = controller.data_column_sidecars_by_root(block_root)?;
let blobs = construct_blobs_from_data_column_sidecars(
Expand All @@ -1294,7 +1336,7 @@ pub async fn blobs<P: Preset, W: Wait>(
metrics.as_ref(),
)?;

if let Some(indices) = query.indices {
if let Some(indices) = requested_indices {
blobs
.into_iter()
.zip(0..)
Expand All @@ -1304,18 +1346,14 @@ pub async fn blobs<P: Preset, W: Wait>(
blobs
}
} else {
let blob_identifiers = query
.indices
let blob_identifiers = requested_indices
.unwrap_or_else(|| (0..max_blobs_per_block).collect())
.into_iter()
.map(|index| {
ensure!(index < max_blobs_per_block, Error::InvalidBlobIndex(index));
Ok(BlobIdentifier { block_root, index })
})
.collect::<Result<Vec<_>>>()?;
let blob_sidecars = controller.blob_sidecars_by_ids(blob_identifiers)?;
.map(|index| BlobIdentifier { block_root, index })
.collect::<Vec<_>>();

blob_sidecars
controller
.blob_sidecars_by_ids(blob_identifiers)?
.into_iter()
.map(|blob_sidecar| blob_sidecar.blob.clone())
.collect()
Expand Down
Loading