diff --git a/beacon-chain/blockchain/kzg/BUILD.bazel b/beacon-chain/blockchain/kzg/BUILD.bazel index 0dcecc80992a..e2cfa3d5c4e0 100644 --- a/beacon-chain/blockchain/kzg/BUILD.bazel +++ b/beacon-chain/blockchain/kzg/BUILD.bazel @@ -30,7 +30,8 @@ go_test( deps = [ "//consensus-types/blocks:go_default_library", "//testing/require:go_default_library", - "//testing/util:go_default_library", + "@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library", "@com_github_crate_crypto_go_kzg_4844//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", ], ) diff --git a/beacon-chain/blockchain/kzg/validation_test.go b/beacon-chain/blockchain/kzg/validation_test.go index a44c3676b714..aeb879806b87 100644 --- a/beacon-chain/blockchain/kzg/validation_test.go +++ b/beacon-chain/blockchain/kzg/validation_test.go @@ -1,12 +1,16 @@ package kzg import ( + "bytes" + "crypto/sha256" + "encoding/binary" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" "github.com/OffchainLabs/prysm/v6/testing/require" - "github.com/OffchainLabs/prysm/v6/testing/util" + "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" GoKZG "github.com/crate-crypto/go-kzg-4844" + "github.com/sirupsen/logrus" ) func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) { @@ -37,7 +41,7 @@ func TestBytesToAny(t *testing.T) { } func TestGenerateCommitmentAndProof(t *testing.T) { - blob := util.GetRandBlob(123) + blob := getRandBlob(123) commitment, proof, err := GenerateCommitmentAndProof(blob) require.NoError(t, err) expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85} @@ -45,3 +49,36 @@ func TestGenerateCommitmentAndProof(t *testing.T) { require.Equal(t, expectedCommitment, commitment) require.Equal(t, expectedProof, proof) } + +func deterministicRandomness(seed int64) [32]byte { + // Converts an int64 to a byte slice + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.BigEndian, seed) + if err != nil { + logrus.WithError(err).Error("Failed to write int64 to bytes buffer") + return [32]byte{} + } + bytes := buf.Bytes() + + return sha256.Sum256(bytes) +} + +// Returns a serialized random field element in big-endian +func getRandFieldElement(seed int64) [32]byte { + bytes := deterministicRandomness(seed) + var r fr.Element + r.SetBytes(bytes[:]) + + return GoKZG.SerializeScalar(r) +} + +// Returns a random blob using the passed seed as entropy +func getRandBlob(seed int64) GoKZG.Blob { + var blob GoKZG.Blob + bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize + for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize { + fieldElementBytes := getRandFieldElement(seed + int64(i)) + copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:]) + } + return blob +} diff --git a/beacon-chain/core/peerdas/BUILD.bazel b/beacon-chain/core/peerdas/BUILD.bazel new file mode 100644 index 000000000000..8d431c3b9126 --- /dev/null +++ b/beacon-chain/core/peerdas/BUILD.bazel @@ -0,0 +1,71 @@ +load("@prysm//tools/go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "das_core.go", + "info.go", + "metrics.go", + "p2p_interface.go", + "reconstruction.go", + "util.go", + "validator.go", + ], + importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas", + visibility = ["//visibility:public"], + deps = [ + "//beacon-chain/blockchain/kzg:go_default_library", + "//beacon-chain/state:go_default_library", + "//cmd/beacon-chain/flags:go_default_library", + "//config/fieldparams:go_default_library", + "//config/params:go_default_library", + "//consensus-types/blocks:go_default_library", + "//consensus-types/interfaces:go_default_library", + "//consensus-types/primitives:go_default_library", + "//container/trie:go_default_library", + "//crypto/hash:go_default_library", + "//encoding/bytesutil:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "//runtime/version:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", + "@com_github_hashicorp_golang_lru//:go_default_library", + "@com_github_holiman_uint256//:go_default_library", + "@com_github_pkg_errors//:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", + "@org_golang_x_sync//errgroup:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "das_core_test.go", + "info_test.go", + "p2p_interface_test.go", + "reconstruction_test.go", + "utils_test.go", + "validator_test.go", + ], + deps = [ + ":go_default_library", + "//beacon-chain/blockchain/kzg:go_default_library", + "//beacon-chain/state/state-native:go_default_library", + "//cmd/beacon-chain/flags:go_default_library", + "//config/fieldparams:go_default_library", + "//config/params:go_default_library", + "//consensus-types/blocks:go_default_library", + "//consensus-types/primitives:go_default_library", + "//proto/engine/v1:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "//testing/require:go_default_library", + "//testing/util:go_default_library", + "@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library", + "@com_github_crate_crypto_go_kzg_4844//:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", + "@com_github_pkg_errors//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", + ], +) diff --git a/beacon-chain/core/peerdas/das_core.go b/beacon-chain/core/peerdas/das_core.go new file mode 100644 index 000000000000..e8acf851f92a --- /dev/null +++ b/beacon-chain/core/peerdas/das_core.go @@ -0,0 +1,402 @@ +package peerdas + +import ( + "encoding/binary" + "math" + "slices" + "time" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" + "github.com/OffchainLabs/prysm/v6/consensus-types/interfaces" + "github.com/OffchainLabs/prysm/v6/crypto/hash" + "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/holiman/uint256" + "github.com/pkg/errors" +) + +var ( + // Custom errors + ErrCustodyGroupTooLarge = errors.New("custody group too large") + ErrCustodyGroupCountTooLarge = errors.New("custody group count too large") + ErrMismatchSize = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs") + errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen") + + // maxUint256 is the maximum value of an uint256. + maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64} +) + +type CustodyType int + +const ( + Target CustodyType = iota + Actual +) + +// CustodyGroups computes the custody groups the node should participate in for custody. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups +func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) { + numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups + + // Check if the custody group count is larger than the number of custody groups. + if custodyGroupCount > numberOfCustodyGroup { + return nil, ErrCustodyGroupCountTooLarge + } + + // Shortcut if all custody groups are needed. + if custodyGroupCount == numberOfCustodyGroup { + custodyGroups := make([]uint64, 0, numberOfCustodyGroup) + for i := range numberOfCustodyGroup { + custodyGroups = append(custodyGroups, i) + } + + return custodyGroups, nil + } + + one := uint256.NewInt(1) + + custodyGroupsMap := make(map[uint64]bool, custodyGroupCount) + custodyGroups := make([]uint64, 0, custodyGroupCount) + for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; { + // Convert to big endian bytes. + currentIdBytesBigEndian := currentId.Bytes32() + + // Convert to little endian. + currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:]) + + // Hash the result. + hashedCurrentId := hash.Hash(currentIdBytesLittleEndian) + + // Get the custody group ID. + custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup + + // Add the custody group to the map. + if !custodyGroupsMap[custodyGroup] { + custodyGroupsMap[custodyGroup] = true + custodyGroups = append(custodyGroups, custodyGroup) + } + + if currentId.Cmp(maxUint256) == 0 { + // Overflow prevention. + currentId = uint256.NewInt(0) + } else { + // Increment the current ID. + currentId.Add(currentId, one) + } + + // Sort the custody groups. + slices.Sort[[]uint64](custodyGroups) + } + + // Final check. + if uint64(len(custodyGroups)) != custodyGroupCount { + return nil, errWrongComputedCustodyGroupCount + } + + return custodyGroups, nil +} + +// ComputeColumnsForCustodyGroup computes the columns for a given custody group. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group +func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) { + beaconConfig := params.BeaconConfig() + numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups + + if custodyGroup >= numberOfCustodyGroup { + return nil, ErrCustodyGroupTooLarge + } + + numberOfColumns := beaconConfig.NumberOfColumns + + columnsPerGroup := numberOfColumns / numberOfCustodyGroup + + columns := make([]uint64, 0, columnsPerGroup) + for i := range columnsPerGroup { + column := numberOfCustodyGroup*i + custodyGroup + columns = append(columns, column) + } + + return columns, nil +} + +// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs. +// The returned value contains pointers to function parameters. +// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.) +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars +func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) { + if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 { + return nil, nil + } + + block := signedBlock.Block() + blockBody := block.Body() + blobKzgCommitments, err := blockBody.BlobKzgCommitments() + if err != nil { + return nil, errors.Wrap(err, "blob KZG commitments") + } + + if len(blobKzgCommitments) != len(cellsAndProofs) { + return nil, ErrMismatchSize + } + + signedBlockHeader, err := signedBlock.Header() + if err != nil { + return nil, errors.Wrap(err, "signed block header") + } + + kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody) + if err != nil { + return nil, errors.Wrap(err, "merkle proof ZKG commitments") + } + + dataColumnSidecars, err := DataColumnsSidecarsFromItems(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs) + if err != nil { + return nil, errors.Wrap(err, "data column sidecars from items") + } + + return dataColumnSidecars, nil +} + +// DataColumnsSidecarsFromItems computes the data column sidecars from the signed block header, the blob KZG commiments, +// the KZG commitment includion proofs and cells and cell proofs. +// The returned value contains pointers to function parameters. +// (If the caller alterates input parameters afterwards, the returned value will be modified as well.) +func DataColumnsSidecarsFromItems( + signedBlockHeader *ethpb.SignedBeaconBlockHeader, + blobKzgCommitments [][]byte, + kzgCommitmentsInclusionProof [][]byte, + cellsAndProofs []kzg.CellsAndProofs, +) ([]*ethpb.DataColumnSidecar, error) { + start := time.Now() + if len(blobKzgCommitments) != len(cellsAndProofs) { + return nil, ErrMismatchSize + } + + numberOfColumns := params.BeaconConfig().NumberOfColumns + + blobsCount := len(cellsAndProofs) + sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns) + for columnIndex := range numberOfColumns { + column := make([]kzg.Cell, 0, blobsCount) + kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount) + + for rowIndex := range blobsCount { + cellsForRow := cellsAndProofs[rowIndex].Cells + proofsForRow := cellsAndProofs[rowIndex].Proofs + + cell := cellsForRow[columnIndex] + column = append(column, cell) + + kzgProof := proofsForRow[columnIndex] + kzgProofOfColumn = append(kzgProofOfColumn, kzgProof) + } + + columnBytes := make([][]byte, 0, blobsCount) + for i := range column { + columnBytes = append(columnBytes, column[i][:]) + } + + kzgProofOfColumnBytes := make([][]byte, 0, blobsCount) + for _, kzgProof := range kzgProofOfColumn { + kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:]) + } + + sidecar := ðpb.DataColumnSidecar{ + Index: columnIndex, + Column: columnBytes, + KzgCommitments: blobKzgCommitments, + KzgProofs: kzgProofOfColumnBytes, + SignedBlockHeader: signedBlockHeader, + KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof, + } + + sidecars = append(sidecars, sidecar) + } + + dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds())) + return sidecars, nil +} + +// ComputeCustodyGroupForColumn computes the custody group for a given column. +// It is the reciprocal function of ComputeColumnsForCustodyGroup. +func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) { + beaconConfig := params.BeaconConfig() + numberOfColumns := beaconConfig.NumberOfColumns + numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups + + if columnIndex >= numberOfColumns { + return 0, ErrIndexTooLarge + } + + return columnIndex % numberOfCustodyGroups, nil +} + +// Blobs extract blobs from `dataColumnsSidecar`. +// This can be seen as the reciprocal function of DataColumnSidecars. +// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix, +// else an error will be returned. +// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.) +func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) { + numberOfColumns := params.BeaconConfig().NumberOfColumns + + // Compute the number of needed columns, including the number of columns is odd case. + neededColumnCount := (numberOfColumns + 1) / 2 + + // Check if all needed columns are present. + sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar)) + for i := range dataColumnsSidecar { + dataColumnSideCar := dataColumnsSidecar[i] + index := dataColumnSideCar.Index + + if index < neededColumnCount { + sliceIndexFromColumnIndex[index] = i + } + } + + actualColumnCount := uint64(len(sliceIndexFromColumnIndex)) + + // Get missing columns. + if actualColumnCount < neededColumnCount { + var missingColumnsSlice []uint64 + + for i := range neededColumnCount { + if _, ok := sliceIndexFromColumnIndex[i]; !ok { + missingColumnsSlice = append(missingColumnsSlice, i) + } + } + + slices.Sort[[]uint64](missingColumnsSlice) + return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice) + } + + // It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty. + firstDataColumnSidecar := dataColumnsSidecar[0] + + blobCount := uint64(len(firstDataColumnSidecar.Column)) + + // Check all colums have te same length. + for i := range dataColumnsSidecar { + if uint64(len(dataColumnsSidecar[i].Column)) != blobCount { + return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].Column)) + } + } + + // Reconstruct verified RO blobs from columns. + verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount) + + // Populate and filter indices. + indicesSlice := populateAndFilterIndices(indices, blobCount) + + for _, blobIndex := range indicesSlice { + var blob kzg.Blob + + // Compute the content of the blob. + for columnIndex := range neededColumnCount { + sliceIndex, ok := sliceIndexFromColumnIndex[columnIndex] + if !ok { + return nil, errors.Errorf("missing column %d, this should never happen", columnIndex) + } + + dataColumnSideCar := dataColumnsSidecar[sliceIndex] + cell := dataColumnSideCar.Column[blobIndex] + + for i := range cell { + blob[columnIndex*kzg.BytesPerCell+uint64(i)] = cell[i] + } + } + + // Retrieve the blob KZG commitment. + blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex]) + + // Compute the blob KZG proof. + blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment) + if err != nil { + return nil, errors.Wrap(err, "compute blob KZG proof") + } + + blobSidecar := ðpb.BlobSidecar{ + Index: blobIndex, + Blob: blob[:], + KzgCommitment: blobKZGCommitment[:], + KzgProof: blobKzgProof[:], + SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader, + CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof, + } + + roBlob, err := blocks.NewROBlob(blobSidecar) + if err != nil { + return nil, errors.Wrap(err, "new RO blob") + } + + verifiedROBlob := blocks.NewVerifiedROBlob(roBlob) + verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob) + } + + return verifiedROBlobs, nil +} + +// CustodyGroupSamplingSize returns the number of custody groups the node should sample from. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling +func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 { + custodyGroupCount := custodyInfo.TargetGroupCount.Get() + + if ct == Actual { + custodyGroupCount = custodyInfo.ActualGroupCount() + } + + samplesPerSlot := params.BeaconConfig().SamplesPerSlot + return max(samplesPerSlot, custodyGroupCount) +} + +// CustodyColumns computes the custody columns from the custody groups. +func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) { + numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups + + custodyGroupCount := len(custodyGroups) + + // Compute the columns for each custody group. + columns := make(map[uint64]bool, custodyGroupCount) + for _, group := range custodyGroups { + if group >= numberOfCustodyGroups { + return nil, ErrCustodyGroupTooLarge + } + + groupColumns, err := ComputeColumnsForCustodyGroup(group) + if err != nil { + return nil, errors.Wrap(err, "compute columns for custody group") + } + + for _, column := range groupColumns { + columns[column] = true + } + } + + return columns, nil +} + +// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided, +// and filtering out indices higher than the blob count. +func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 { + // If no indices are provided, provide all blobs. + if len(indices) == 0 { + for i := range blobCount { + indices[i] = true + } + } + + // Filter blobs index higher than the blob count. + indicesSlice := make([]uint64, 0, len(indices)) + for i := range indices { + if i < blobCount { + indicesSlice = append(indicesSlice, i) + } + } + + // Sort the indices. + slices.Sort[[]uint64](indicesSlice) + + return indicesSlice +} diff --git a/beacon-chain/core/peerdas/das_core_test.go b/beacon-chain/core/peerdas/das_core_test.go new file mode 100644 index 000000000000..69914718d1f1 --- /dev/null +++ b/beacon-chain/core/peerdas/das_core_test.go @@ -0,0 +1,311 @@ +package peerdas_test + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/OffchainLabs/prysm/v6/testing/require" + "github.com/OffchainLabs/prysm/v6/testing/util" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/pkg/errors" +) + +func TestCustodyGroups(t *testing.T) { + // The happy path is unit tested in spec tests. + numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups + _, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1) + require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge) +} + +func TestComputeColumnsForCustodyGroup(t *testing.T) { + // The happy path is unit tested in spec tests. + numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups + _, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup) + require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge) +} + +func TestDataColumnSidecars(t *testing.T) { + t.Run("nil signed block", func(t *testing.T) { + var expected []*ethpb.DataColumnSidecar = nil + actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{}) + require.NoError(t, err) + + require.DeepSSZEqual(t, expected, actual) + }) + + t.Run("empty cells and proofs", func(t *testing.T) { + // Create a protobuf signed beacon block. + signedBeaconBlockPb := util.NewBeaconBlockDeneb() + + // Create a signed beacon block from the protobuf. + signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb) + require.NoError(t, err) + + actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{}) + require.NoError(t, err) + require.IsNil(t, actual) + }) + + t.Run("sizes mismatch", func(t *testing.T) { + // Create a protobuf signed beacon block. + signedBeaconBlockPb := util.NewBeaconBlockDeneb() + + // Create a signed beacon block from the protobuf. + signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb) + require.NoError(t, err) + + // Create cells and proofs. + cellsAndProofs := make([]kzg.CellsAndProofs, 1) + + _, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs) + require.ErrorIs(t, err, peerdas.ErrMismatchSize) + }) +} + +// -------------------------------------------------------------------------------------------------------------------------------------- +// DataColumnsSidecarsFromItems is tested as part of the DataColumnSidecars tests, in the TestDataColumnsSidecarsBlobsRoundtrip function. +// -------------------------------------------------------------------------------------------------------------------------------------- + +func TestComputeCustodyGroupForColumn(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.NumberOfColumns = 128 + config.NumberOfCustodyGroups = 64 + params.OverrideBeaconConfig(config) + + t.Run("index too large", func(t *testing.T) { + _, err := peerdas.ComputeCustodyGroupForColumn(1_000_000) + require.ErrorIs(t, err, peerdas.ErrIndexTooLarge) + }) + + t.Run("nominal", func(t *testing.T) { + expected := uint64(2) + actual, err := peerdas.ComputeCustodyGroupForColumn(2) + require.NoError(t, err) + require.Equal(t, expected, actual) + + expected = uint64(3) + actual, err = peerdas.ComputeCustodyGroupForColumn(3) + require.NoError(t, err) + require.Equal(t, expected, actual) + + expected = uint64(2) + actual, err = peerdas.ComputeCustodyGroupForColumn(66) + require.NoError(t, err) + require.Equal(t, expected, actual) + + expected = uint64(3) + actual, err = peerdas.ComputeCustodyGroupForColumn(67) + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} + +func TestBlobs(t *testing.T) { + blobsIndice := map[uint64]bool{} + + numberOfColumns := params.BeaconConfig().NumberOfColumns + + almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2) + for i := uint64(2); i < numberOfColumns/2+2; i++ { + almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{ + Index: i, + }) + } + + testCases := []struct { + name string + input []*ethpb.DataColumnSidecar + expected []*blocks.VerifiedROBlob + err error + }{ + { + name: "empty input", + input: []*ethpb.DataColumnSidecar{}, + expected: nil, + err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"), + }, + { + name: "missing columns", + input: almostAllColumns, + expected: nil, + err: errors.New("some columns are missing: [0 1]"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual, err := peerdas.Blobs(blobsIndice, tc.input) + if tc.err != nil { + require.Equal(t, tc.err.Error(), err.Error()) + } else { + require.NoError(t, err) + } + require.DeepSSZEqual(t, tc.expected, actual) + }) + } +} + +func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) { + const blobCount = 5 + blobsIndex := map[uint64]bool{} + + // Start the trusted setup. + err := kzg.Start() + require.NoError(t, err) + + // Create a protobuf signed beacon block. + signedBeaconBlockPb := util.NewBeaconBlockDeneb() + + // Generate random blobs and their corresponding commitments and proofs. + blobs := make([]kzg.Blob, 0, blobCount) + blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount) + blobKzgProofs := make([]*kzg.Proof, 0, blobCount) + + for blobIndex := range blobCount { + // Create a random blob. + blob := getRandBlob(int64(blobIndex)) + blobs = append(blobs, blob) + + // Generate a blobKZGCommitment for the blob. + blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob) + require.NoError(t, err) + + blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment) + blobKzgProofs = append(blobKzgProofs, proof) + } + + // Set the commitments into the block. + blobZkgCommitmentsBytes := make([][]byte, 0, blobCount) + for _, blobKZGCommitment := range blobKzgCommitments { + blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:]) + } + + signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes + + // Generate verified RO blobs. + verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount) + + // Create a signed beacon block from the protobuf. + signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb) + require.NoError(t, err) + + commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body()) + require.NoError(t, err) + + for blobIndex := range blobCount { + blob := blobs[blobIndex] + blobKZGCommitment := blobKzgCommitments[blobIndex] + blobKzgProof := blobKzgProofs[blobIndex] + + // Get the signed beacon block header. + signedBeaconBlockHeader, err := signedBeaconBlock.Header() + require.NoError(t, err) + + blobSidecar := ðpb.BlobSidecar{ + Index: uint64(blobIndex), + Blob: blob[:], + KzgCommitment: blobKZGCommitment[:], + KzgProof: blobKzgProof[:], + SignedBlockHeader: signedBeaconBlockHeader, + CommitmentInclusionProof: commitmentInclusionProof, + } + + roBlob, err := blocks.NewROBlob(blobSidecar) + require.NoError(t, err) + + verifiedROBlob := blocks.NewVerifiedROBlob(roBlob) + verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob) + } + + // Compute data columns sidecars from the signed beacon block and from the blobs. + cellsAndProofs := util.GenerateCellsAndProofs(t, blobs) + dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs) + require.NoError(t, err) + + // Compute the blobs from the data columns sidecar. + roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar) + require.NoError(t, err) + + // Check that the blobs are the same. + require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs) +} + +func TestCustodyGroupSamplingSize(t *testing.T) { + testCases := []struct { + name string + custodyType peerdas.CustodyType + validatorsCustodyRequirement uint64 + toAdvertiseCustodyGroupCount uint64 + expected uint64 + }{ + { + name: "target, lower than samples per slot", + custodyType: peerdas.Target, + validatorsCustodyRequirement: 2, + expected: 8, + }, + { + name: "target, higher than samples per slot", + custodyType: peerdas.Target, + validatorsCustodyRequirement: 100, + expected: 100, + }, + { + name: "actual, lower than samples per slot", + custodyType: peerdas.Actual, + validatorsCustodyRequirement: 3, + toAdvertiseCustodyGroupCount: 4, + expected: 8, + }, + { + name: "actual, higher than samples per slot", + custodyType: peerdas.Actual, + validatorsCustodyRequirement: 100, + toAdvertiseCustodyGroupCount: 101, + expected: 100, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create a custody info. + custodyInfo := peerdas.CustodyInfo{} + + // Set the validators custody requirement for target custody group count. + custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement) + + // Set the to advertise custody group count. + custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount) + + // Compute the custody group sampling size. + actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType) + + // Check the result. + require.Equal(t, tc.expected, actual) + }) + } +} + +func TestCustodyColumns(t *testing.T) { + t.Run("group too large", func(t *testing.T) { + _, err := peerdas.CustodyColumns([]uint64{1_000_000}) + require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge) + }) + + t.Run("nominal", func(t *testing.T) { + input := []uint64{1, 2} + expected := map[uint64]bool{1: true, 2: true} + + actual, err := peerdas.CustodyColumns(input) + require.NoError(t, err) + require.Equal(t, len(expected), len(actual)) + for i := range actual { + require.Equal(t, expected[i], actual[i]) + } + }) +} diff --git a/beacon-chain/core/peerdas/info.go b/beacon-chain/core/peerdas/info.go new file mode 100644 index 000000000000..c00cd4b1f2f4 --- /dev/null +++ b/beacon-chain/core/peerdas/info.go @@ -0,0 +1,192 @@ +package peerdas + +import ( + "encoding/binary" + "sync" + + "github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/ethereum/go-ethereum/p2p/enode" + lru "github.com/hashicorp/golang-lru" + "github.com/pkg/errors" +) + +// info contains all useful peerDAS related information regarding a peer. +type ( + info struct { + CustodyGroups map[uint64]bool + CustodyColumns map[uint64]bool + DataColumnsSubnets map[uint64]bool + } + + targetCustodyGroupCount struct { + mut sync.RWMutex + validatorsCustodyRequirement uint64 + } + + toAdverstiseCustodyGroupCount struct { + mut sync.RWMutex + value uint64 + } + + CustodyInfo struct { + // Mut is a mutex to be used by caller to ensure neither + // TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified. + // (This is not necessary to use this mutex for any data protection.) + Mut sync.RWMutex + + // TargetGroupCount represents the target number of custody groups we should custody + // regarding the validators we are tracking. + TargetGroupCount targetCustodyGroupCount + + // ToAdvertiseGroupCount represents the number of custody groups to advertise to the network. + ToAdvertiseGroupCount toAdverstiseCustodyGroupCount + } +) + +const ( + nodeInfoCacheSize = 200 + nodeInfoCachKeySize = 32 + 8 +) + +var ( + nodeInfoCacheMut sync.Mutex + nodeInfoCache *lru.Cache +) + +// Info returns the peerDAS information for a given nodeID and custodyGroupCount. +// It returns a boolean indicating if the peer info was already in the cache and an error if any. +func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) { + // Create a new cache if it doesn't exist. + if err := createInfoCacheIfNeeded(); err != nil { + return nil, false, errors.Wrap(err, "create cache if needed") + } + + // Compute the key. + key := computeInfoCacheKey(nodeID, custodyGroupCount) + + // If the value is already in the cache, return it. + if value, ok := nodeInfoCache.Get(key); ok { + peerInfo, ok := value.(*info) + if !ok { + return nil, false, errors.New("failed to cast peer info (should never happen)") + } + + return peerInfo, true, nil + } + + // The peer info is not in the cache, compute it. + // Compute custody groups. + custodyGroups, err := CustodyGroups(nodeID, custodyGroupCount) + if err != nil { + return nil, false, errors.Wrap(err, "custody groups") + } + + // Compute custody columns. + custodyColumns, err := CustodyColumns(custodyGroups) + if err != nil { + return nil, false, errors.Wrap(err, "custody columns") + } + + // Compute data columns subnets. + dataColumnsSubnets := DataColumnSubnets(custodyColumns) + + // Convert the custody groups to a map. + custodyGroupsMap := make(map[uint64]bool, len(custodyGroups)) + for _, group := range custodyGroups { + custodyGroupsMap[group] = true + } + + result := &info{ + CustodyGroups: custodyGroupsMap, + CustodyColumns: custodyColumns, + DataColumnsSubnets: dataColumnsSubnets, + } + + // Add the result to the cache. + nodeInfoCache.Add(key, result) + + return result, false, nil +} + +// ActualGroupCount returns the actual custody group count. +func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 { + return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get()) +} + +// CustodyGroupCount returns the number of groups we should participate in for custody. +func (tcgc *targetCustodyGroupCount) Get() uint64 { + // If subscribed to all subnets, return the number of custody groups. + if flags.Get().SubscribeToAllSubnets { + return params.BeaconConfig().NumberOfCustodyGroups + } + + tcgc.mut.RLock() + defer tcgc.mut.RUnlock() + + // If no validators are tracked, return the default custody requirement. + if tcgc.validatorsCustodyRequirement == 0 { + return params.BeaconConfig().CustodyRequirement + } + + // Return the validators custody requirement. + return tcgc.validatorsCustodyRequirement +} + +// setValidatorsCustodyRequirement sets the validators custody requirement. +func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) { + tcgc.mut.Lock() + defer tcgc.mut.Unlock() + + tcgc.validatorsCustodyRequirement = value +} + +// Get returns the to advertise custody group count. +func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 { + // If subscribed to all subnets, return the number of custody groups. + if flags.Get().SubscribeToAllSubnets { + return params.BeaconConfig().NumberOfCustodyGroups + } + + custodyRequirement := params.BeaconConfig().CustodyRequirement + + tacgc.mut.RLock() + defer tacgc.mut.RUnlock() + + return max(tacgc.value, custodyRequirement) +} + +// Set sets the to advertise custody group count. +func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) { + tacgc.mut.Lock() + defer tacgc.mut.Unlock() + + tacgc.value = value +} + +// createInfoCacheIfNeeded creates a new cache if it doesn't exist. +func createInfoCacheIfNeeded() error { + nodeInfoCacheMut.Lock() + defer nodeInfoCacheMut.Unlock() + + if nodeInfoCache == nil { + c, err := lru.New(nodeInfoCacheSize) + if err != nil { + return errors.Wrap(err, "lru new") + } + + nodeInfoCache = c + } + + return nil +} + +// computeInfoCacheKey returns a unique key for a node and its custodyGroupCount. +func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCachKeySize]byte { + var key [nodeInfoCachKeySize]byte + + copy(key[:32], nodeID[:]) + binary.BigEndian.PutUint64(key[32:], custodyGroupCount) + + return key +} diff --git a/beacon-chain/core/peerdas/info_test.go b/beacon-chain/core/peerdas/info_test.go new file mode 100644 index 000000000000..8dc0eaa48579 --- /dev/null +++ b/beacon-chain/core/peerdas/info_test.go @@ -0,0 +1,133 @@ +package peerdas_test + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" + "github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags" + "github.com/OffchainLabs/prysm/v6/testing/require" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +func TestInfo(t *testing.T) { + nodeID := enode.ID{} + custodyGroupCount := uint64(7) + + expectedCustodyGroup := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true} + expectedCustodyColumns := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true} + expectedDataColumnsSubnets := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true} + + for _, cached := range []bool{false, true} { + actual, ok, err := peerdas.Info(nodeID, custodyGroupCount) + require.NoError(t, err) + require.Equal(t, cached, ok) + require.DeepEqual(t, expectedCustodyGroup, actual.CustodyGroups) + require.DeepEqual(t, expectedCustodyColumns, actual.CustodyColumns) + require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets) + } +} + +func TestTargetCustodyGroupCount(t *testing.T) { + testCases := []struct { + name string + subscribeToAllSubnets bool + validatorsCustodyRequirement uint64 + expected uint64 + }{ + { + name: "subscribed to all subnets", + subscribeToAllSubnets: true, + validatorsCustodyRequirement: 100, + expected: 128, + }, + { + name: "no validators attached", + subscribeToAllSubnets: false, + validatorsCustodyRequirement: 0, + expected: 4, + }, + { + name: "some validators attached", + subscribeToAllSubnets: false, + validatorsCustodyRequirement: 100, + expected: 100, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Subscribe to all subnets if needed. + if tc.subscribeToAllSubnets { + resetFlags := flags.Get() + gFlags := new(flags.GlobalFlags) + gFlags.SubscribeToAllSubnets = true + flags.Init(gFlags) + defer flags.Init(resetFlags) + } + + var custodyInfo peerdas.CustodyInfo + + // Set the validators custody requirement. + custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement) + + // Get the target custody group count. + actual := custodyInfo.TargetGroupCount.Get() + + // Compare the expected and actual values. + require.Equal(t, tc.expected, actual) + }) + } +} + +func TestToAdvertiseCustodyGroupCount(t *testing.T) { + testCases := []struct { + name string + subscribeToAllSubnets bool + toAdvertiseCustodyGroupCount uint64 + expected uint64 + }{ + { + name: "subscribed to all subnets", + subscribeToAllSubnets: true, + toAdvertiseCustodyGroupCount: 100, + expected: 128, + }, + { + name: "higher than custody requirement", + subscribeToAllSubnets: false, + toAdvertiseCustodyGroupCount: 100, + expected: 100, + }, + { + name: "lower than custody requirement", + subscribeToAllSubnets: false, + toAdvertiseCustodyGroupCount: 1, + expected: 4, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Subscribe to all subnets if needed. + if tc.subscribeToAllSubnets { + resetFlags := flags.Get() + gFlags := new(flags.GlobalFlags) + gFlags.SubscribeToAllSubnets = true + flags.Init(gFlags) + defer flags.Init(resetFlags) + } + + // Create a custody info. + var custodyInfo peerdas.CustodyInfo + + // Set the to advertise custody group count. + custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount) + + // Get the to advertise custody group count. + actual := custodyInfo.ToAdvertiseGroupCount.Get() + + // Compare the expected and actual values. + require.Equal(t, tc.expected, actual) + }) + } +} diff --git a/beacon-chain/core/peerdas/metrics.go b/beacon-chain/core/peerdas/metrics.go new file mode 100644 index 000000000000..cf8d73254b33 --- /dev/null +++ b/beacon-chain/core/peerdas/metrics.go @@ -0,0 +1,14 @@ +package peerdas + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var dataColumnComputationTime = promauto.NewHistogram( + prometheus.HistogramOpts{ + Name: "beacon_data_column_sidecar_computation_milliseconds", + Help: "Captures the time taken to compute data column sidecars from blobs.", + Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000}, + }, +) diff --git a/beacon-chain/core/peerdas/p2p_interface.go b/beacon-chain/core/peerdas/p2p_interface.go new file mode 100644 index 000000000000..1175e94c1554 --- /dev/null +++ b/beacon-chain/core/peerdas/p2p_interface.go @@ -0,0 +1,162 @@ +package peerdas + +import ( + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" + "github.com/OffchainLabs/prysm/v6/container/trie" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/pkg/errors" +) + +const ( + CustodyGroupCountEnrKey = "cgc" + kzgPosition = 11 // The index of the KZG commitment list in the Body +) + +var ( + ErrIndexTooLarge = errors.New("column index is larger than the specified columns count") + ErrNoKzgCommitments = errors.New("no KZG commitments found") + ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs") + ErrInvalidKZGProof = errors.New("invalid KZG proof") + ErrBadRootLength = errors.New("bad root length") + ErrInvalidInclusionProof = errors.New("invalid inclusion proof") + ErrRecordNil = errors.New("record is nil") + ErrNilBlockHeader = errors.New("nil beacon block header") + ErrCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer") +) + +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count +type Cgc uint64 + +func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey } + +// VerifyDataColumnSidecar verifies if the data column sidecar is valid. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar +func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error { + // The sidecar index must be within the valid range. + numberOfColumns := params.BeaconConfig().NumberOfColumns + if sidecar.Index >= numberOfColumns { + return ErrIndexTooLarge + } + + // A sidecar for zero blobs is invalid. + if len(sidecar.KzgCommitments) == 0 { + return ErrNoKzgCommitments + } + + // The column length must be equal to the number of commitments/proofs. + if len(sidecar.Column) != len(sidecar.KzgCommitments) || len(sidecar.Column) != len(sidecar.KzgProofs) { + return ErrMismatchLength + } + + return nil +} + +// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct. +// Note: We are slightly deviating from the specification here: +// The specification verifies the KZG proofs for each sidecar separately, +// while we are verifying all the KZG proofs from multiple sidecars in a batch. +// This is done to improve performance since the internal KZG library is way more +// efficient when verifying in batch. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs +func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error { + // Compute the total count. + count := 0 + for _, sidecar := range sidecars { + count += len(sidecar.Column) + } + + commitments := make([]kzg.Bytes48, 0, count) + indices := make([]uint64, 0, count) + cells := make([]kzg.Cell, 0, count) + proofs := make([]kzg.Bytes48, 0, count) + + for _, sidecar := range sidecars { + for i := range sidecar.Column { + commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i])) + indices = append(indices, sidecar.Index) + cells = append(cells, kzg.Cell(sidecar.Column[i])) + proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i])) + } + } + + // Batch verify that the cells match the corresponding commitments and proofs. + verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs) + if err != nil { + return errors.Wrap(err, "verify cell KZG proof batch") + } + + if !verified { + return ErrInvalidKZGProof + } + + return nil +} + +// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof +func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error { + if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil { + return ErrNilBlockHeader + } + + root := sidecar.SignedBlockHeader.Header.BodyRoot + if len(root) != fieldparams.RootLength { + return ErrBadRootLength + } + + leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments) + + sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments) + if err != nil { + return errors.Wrap(err, "generate trie from items") + } + + hashTreeRoot, err := sparse.HashTreeRoot() + if err != nil { + return errors.Wrap(err, "hash tree root") + } + + verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof) + if !verified { + return ErrInvalidInclusionProof + } + + return nil +} + +// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar +func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 { + dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount + return columnIndex % dataColumnSidecarSubnetCount +} + +// DataColumnSubnets computes the subnets for the data columns. +func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool { + subnets := make(map[uint64]bool, len(dataColumns)) + + for column := range dataColumns { + subnet := ComputeSubnetForDataColumnSidecar(column) + subnets[subnet] = true + } + + return subnets +} + +// CustodyGroupCountFromRecord extracts the custody group count from an ENR record. +func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) { + if record == nil { + return 0, ErrRecordNil + } + + // Load the `cgc` + var cgc Cgc + if err := record.Load(&cgc); err != nil { + return 0, ErrCannotLoadCustodyGroupCount + } + + return uint64(cgc), nil +} diff --git a/beacon-chain/core/peerdas/p2p_interface_test.go b/beacon-chain/core/peerdas/p2p_interface_test.go new file mode 100644 index 000000000000..8790c347d09c --- /dev/null +++ b/beacon-chain/core/peerdas/p2p_interface_test.go @@ -0,0 +1,304 @@ +package peerdas_test + +import ( + "crypto/rand" + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" + fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" + enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/OffchainLabs/prysm/v6/testing/require" + "github.com/OffchainLabs/prysm/v6/testing/util" + "github.com/ethereum/go-ethereum/p2p/enr" +) + +func TestVerifyDataColumnSidecar(t *testing.T) { + t.Run("index too large", func(t *testing.T) { + roSidecar := createTestSidecar(t, 1_000_000, nil, nil, nil) + err := peerdas.VerifyDataColumnSidecar(roSidecar) + require.ErrorIs(t, err, peerdas.ErrIndexTooLarge) + }) + + t.Run("no commitments", func(t *testing.T) { + roSidecar := createTestSidecar(t, 0, nil, nil, nil) + err := peerdas.VerifyDataColumnSidecar(roSidecar) + require.ErrorIs(t, err, peerdas.ErrNoKzgCommitments) + }) + + t.Run("KZG commitments size mismatch", func(t *testing.T) { + kzgCommitments := make([][]byte, 1) + roSidecar := createTestSidecar(t, 0, nil, kzgCommitments, nil) + err := peerdas.VerifyDataColumnSidecar(roSidecar) + require.ErrorIs(t, err, peerdas.ErrMismatchLength) + }) + + t.Run("KZG proofs size mismatch", func(t *testing.T) { + column, kzgCommitments := make([][]byte, 1), make([][]byte, 1) + roSidecar := createTestSidecar(t, 0, column, kzgCommitments, nil) + err := peerdas.VerifyDataColumnSidecar(roSidecar) + require.ErrorIs(t, err, peerdas.ErrMismatchLength) + }) + + t.Run("nominal", func(t *testing.T) { + column, kzgCommitments, kzgProofs := make([][]byte, 1), make([][]byte, 1), make([][]byte, 1) + roSidecar := createTestSidecar(t, 0, column, kzgCommitments, kzgProofs) + err := peerdas.VerifyDataColumnSidecar(roSidecar) + require.NoError(t, err) + }) +} + +func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) { + err := kzg.Start() + require.NoError(t, err) + + generateSidecars := func(t *testing.T) []*ethpb.DataColumnSidecar { + const blobCount = int64(6) + + dbBlock := util.NewBeaconBlockDeneb() + + commitments := make([][]byte, 0, blobCount) + blobs := make([]kzg.Blob, 0, blobCount) + + for i := range blobCount { + blob := getRandBlob(i) + commitment, _, err := generateCommitmentAndProof(&blob) + require.NoError(t, err) + + commitments = append(commitments, commitment[:]) + blobs = append(blobs, blob) + } + + dbBlock.Block.Body.BlobKzgCommitments = commitments + sBlock, err := blocks.NewSignedBeaconBlock(dbBlock) + require.NoError(t, err) + + cellsAndProofs := util.GenerateCellsAndProofs(t, blobs) + sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs) + require.NoError(t, err) + + return sidecars + } + + generateRODataColumnSidecars := func(t *testing.T, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn { + roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars)) + for _, sidecar := range sidecars { + roCol, err := blocks.NewRODataColumn(sidecar) + require.NoError(t, err) + + roDataColumnSidecars = append(roDataColumnSidecars, roCol) + } + + return roDataColumnSidecars + } + + t.Run("invalid proof", func(t *testing.T) { + sidecars := generateSidecars(t) + sidecars[0].Column[0][0]++ // It is OK to overflow + roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars) + + err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars) + require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof) + }) + + t.Run("nominal", func(t *testing.T) { + sidecars := generateSidecars(t) + roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars) + + err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars) + require.NoError(t, err) + }) +} + +func Test_VerifyKZGInclusionProofColumn(t *testing.T) { + const ( + blobCount = 3 + columnIndex = 0 + ) + + // Generate random KZG commitments `blobCount` blobs. + kzgCommitments := make([][]byte, blobCount) + + for i := 0; i < blobCount; i++ { + kzgCommitments[i] = make([]byte, 48) + _, err := rand.Read(kzgCommitments[i]) + require.NoError(t, err) + } + + pbBody := ðpb.BeaconBlockBodyDeneb{ + RandaoReveal: make([]byte, 96), + Eth1Data: ðpb.Eth1Data{ + DepositRoot: make([]byte, fieldparams.RootLength), + BlockHash: make([]byte, fieldparams.RootLength), + }, + Graffiti: make([]byte, 32), + SyncAggregate: ðpb.SyncAggregate{ + SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength), + SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength), + }, + ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ + ParentHash: make([]byte, fieldparams.RootLength), + FeeRecipient: make([]byte, 20), + StateRoot: make([]byte, fieldparams.RootLength), + ReceiptsRoot: make([]byte, fieldparams.RootLength), + LogsBloom: make([]byte, 256), + PrevRandao: make([]byte, fieldparams.RootLength), + BaseFeePerGas: make([]byte, fieldparams.RootLength), + BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), + ExtraData: make([]byte, 0), + }, + BlobKzgCommitments: kzgCommitments, + } + + root, err := pbBody.HashTreeRoot() + require.NoError(t, err) + + body, err := blocks.NewBeaconBlockBody(pbBody) + require.NoError(t, err) + + kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(body) + require.NoError(t, err) + + testCases := []struct { + name string + expectedError error + dataColumnSidecar *ethpb.DataColumnSidecar + }{ + { + name: "nilSignedBlockHeader", + expectedError: peerdas.ErrNilBlockHeader, + dataColumnSidecar: ðpb.DataColumnSidecar{}, + }, + { + name: "nilHeader", + expectedError: peerdas.ErrNilBlockHeader, + dataColumnSidecar: ðpb.DataColumnSidecar{ + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{}, + }, + }, + { + name: "invalidBodyRoot", + expectedError: peerdas.ErrBadRootLength, + dataColumnSidecar: ðpb.DataColumnSidecar{ + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{}, + }, + }, + }, + { + name: "unverifiedMerkleProof", + expectedError: peerdas.ErrInvalidInclusionProof, + dataColumnSidecar: ðpb.DataColumnSidecar{ + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + BodyRoot: make([]byte, 32), + }, + }, + KzgCommitments: kzgCommitments, + }, + }, + { + name: "nominal", + expectedError: nil, + dataColumnSidecar: ðpb.DataColumnSidecar{ + KzgCommitments: kzgCommitments, + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + BodyRoot: root[:], + }, + }, + KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + roDataColumn := blocks.RODataColumn{DataColumnSidecar: tc.dataColumnSidecar} + err = peerdas.VerifyDataColumnSidecarInclusionProof(roDataColumn) + if tc.expectedError == nil { + require.NoError(t, err) + return + } + + require.ErrorIs(t, tc.expectedError, err) + }) + } +} + +func TestComputeSubnetForDataColumnSidecar(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.DataColumnSidecarSubnetCount = 128 + params.OverrideBeaconConfig(config) + + require.Equal(t, uint64(0), peerdas.ComputeSubnetForDataColumnSidecar(0)) + require.Equal(t, uint64(1), peerdas.ComputeSubnetForDataColumnSidecar(1)) + require.Equal(t, uint64(0), peerdas.ComputeSubnetForDataColumnSidecar(128)) + require.Equal(t, uint64(1), peerdas.ComputeSubnetForDataColumnSidecar(129)) +} + +func TestDataColumnSubnets(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.DataColumnSidecarSubnetCount = 128 + params.OverrideBeaconConfig(config) + + input := map[uint64]bool{0: true, 1: true, 128: true, 129: true, 131: true} + expected := map[uint64]bool{0: true, 1: true, 3: true} + actual := peerdas.DataColumnSubnets(input) + + require.Equal(t, len(expected), len(actual)) + for k, v := range expected { + require.Equal(t, v, actual[k]) + } +} + +func TestCustodyGroupCountFromRecord(t *testing.T) { + t.Run("nil record", func(t *testing.T) { + _, err := peerdas.CustodyGroupCountFromRecord(nil) + require.ErrorIs(t, err, peerdas.ErrRecordNil) + }) + + t.Run("no cgc", func(t *testing.T) { + _, err := peerdas.CustodyGroupCountFromRecord(&enr.Record{}) + require.ErrorIs(t, err, peerdas.ErrCannotLoadCustodyGroupCount) + }) + + t.Run("nominal", func(t *testing.T) { + const expected uint64 = 7 + + record := &enr.Record{} + record.Set(peerdas.Cgc(expected)) + + actual, err := peerdas.CustodyGroupCountFromRecord(record) + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} + +func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgProofs [][]byte) blocks.RODataColumn { + pbSignedBeaconBlock := util.NewBeaconBlockDeneb() + signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock) + require.NoError(t, err) + + signedBlockHeader, err := signedBeaconBlock.Header() + require.NoError(t, err) + + sidecar := ðpb.DataColumnSidecar{ + Index: index, + Column: column, + KzgCommitments: kzgCommitments, + KzgProofs: kzgProofs, + SignedBlockHeader: signedBlockHeader, + } + + roSidecar, err := blocks.NewRODataColumn(sidecar) + require.NoError(t, err) + + return roSidecar +} diff --git a/beacon-chain/core/peerdas/reconstruction.go b/beacon-chain/core/peerdas/reconstruction.go new file mode 100644 index 000000000000..7f38dc684400 --- /dev/null +++ b/beacon-chain/core/peerdas/reconstruction.go @@ -0,0 +1,76 @@ +package peerdas + +import ( + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/OffchainLabs/prysm/v6/config/params" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count. +func CanSelfReconstruct(custodyGroupCount uint64) bool { + total := params.BeaconConfig().NumberOfCustodyGroups + // If total is odd, then we need total / 2 + 1 columns to reconstruct. + // If total is even, then we need total / 2 columns to reconstruct. + return custodyGroupCount >= (total+1)/2 +} + +// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars. +func RecoverCellsAndProofs(dataColumnSideCars []*ethpb.DataColumnSidecar) ([]kzg.CellsAndProofs, error) { + var wg errgroup.Group + + dataColumnSideCarsCount := len(dataColumnSideCars) + + if dataColumnSideCarsCount == 0 { + return nil, errors.New("no data column sidecars") + } + + // Check if all columns have the same length. + blobCount := len(dataColumnSideCars[0].Column) + for _, sidecar := range dataColumnSideCars { + length := len(sidecar.Column) + + if length != blobCount { + return nil, errors.New("columns do not have the same length") + } + } + + // Recover cells and compute proofs in parallel. + recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount) + + for blobIndex := 0; blobIndex < blobCount; blobIndex++ { + bIndex := blobIndex + wg.Go(func() error { + cellsIndices := make([]uint64, 0, dataColumnSideCarsCount) + cells := make([]kzg.Cell, 0, dataColumnSideCarsCount) + + for _, sidecar := range dataColumnSideCars { + // Build the cell indices. + cellsIndices = append(cellsIndices, sidecar.Index) + + // Get the cell. + column := sidecar.Column + cell := column[bIndex] + + cells = append(cells, kzg.Cell(cell)) + } + + // Recover the cells and proofs for the corresponding blob + cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells) + + if err != nil { + return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex) + } + + recoveredCellsAndProofs[bIndex] = cellsAndProofs + return nil + }) + } + + if err := wg.Wait(); err != nil { + return nil, err + } + + return recoveredCellsAndProofs, nil +} diff --git a/beacon-chain/core/peerdas/reconstruction_test.go b/beacon-chain/core/peerdas/reconstruction_test.go new file mode 100644 index 000000000000..c80eb2ac9a55 --- /dev/null +++ b/beacon-chain/core/peerdas/reconstruction_test.go @@ -0,0 +1,57 @@ +package peerdas_test + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/testing/require" +) + +func TestCanSelfReconstruct(t *testing.T) { + testCases := []struct { + name string + totalNumberOfCustodyGroups uint64 + custodyNumberOfGroups uint64 + expected bool + }{ + { + name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31", + totalNumberOfCustodyGroups: 64, + custodyNumberOfGroups: 31, + expected: false, + }, + { + name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32", + totalNumberOfCustodyGroups: 64, + custodyNumberOfGroups: 32, + expected: true, + }, + { + name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32", + totalNumberOfCustodyGroups: 65, + custodyNumberOfGroups: 32, + expected: false, + }, + { + name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33", + totalNumberOfCustodyGroups: 65, + custodyNumberOfGroups: 33, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Set the total number of columns. + params.SetupTestConfigCleanup(t) + cfg := params.BeaconConfig().Copy() + cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups + params.OverrideBeaconConfig(cfg) + + // Check if reconstuction is possible. + actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups) + require.Equal(t, tc.expected, actual) + }) + } +} diff --git a/beacon-chain/core/peerdas/util.go b/beacon-chain/core/peerdas/util.go new file mode 100644 index 000000000000..373ad34a5e78 --- /dev/null +++ b/beacon-chain/core/peerdas/util.go @@ -0,0 +1,54 @@ +package peerdas + +import ( + "fmt" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/consensus-types/interfaces" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/OffchainLabs/prysm/v6/runtime/version" + "github.com/pkg/errors" +) + +// ConstructDataColumnSidecars constructs data column sidecars from a block, blobs and their cell proofs. +// This is a convenience method as blob and cell proofs are common inputs. +func ConstructDataColumnSidecars(block interfaces.SignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) { + // Check if the block is at least a Fulu block. + if block.Version() < version.Fulu { + return nil, nil + } + + numberOfColumns := params.BeaconConfig().NumberOfColumns + if uint64(len(blobs))*numberOfColumns != uint64(len(cellProofs)) { + return nil, fmt.Errorf("number of blobs and cell proofs do not match: %d * %d != %d", len(blobs), numberOfColumns, len(cellProofs)) + } + + cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobs)) + + for i, blob := range blobs { + var b kzg.Blob + copy(b[:], blob) + cells, err := kzg.ComputeCells(&b) + if err != nil { + return nil, err + } + + var proofs []kzg.Proof + for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ { + proofs = append(proofs, kzg.Proof(cellProofs[idx])) + } + + cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{ + Cells: cells, + Proofs: proofs, + }) + } + + dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs) + if err != nil { + return nil, errors.Wrap(err, "data column sidcars") + } + + return dataColumnSidecars, nil +} diff --git a/beacon-chain/core/peerdas/utils_test.go b/beacon-chain/core/peerdas/utils_test.go new file mode 100644 index 000000000000..89c4bffb5bec --- /dev/null +++ b/beacon-chain/core/peerdas/utils_test.go @@ -0,0 +1,57 @@ +package peerdas_test + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" + GoKZG "github.com/crate-crypto/go-kzg-4844" + "github.com/sirupsen/logrus" +) + +func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) { + commitment, err := kzg.BlobToKZGCommitment(blob) + if err != nil { + return nil, nil, err + } + proof, err := kzg.ComputeBlobKZGProof(blob, commitment) + if err != nil { + return nil, nil, err + } + return &commitment, &proof, err +} + +// Returns a random blob using the passed seed as entropy +func getRandBlob(seed int64) kzg.Blob { + var blob kzg.Blob + bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize + for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize { + fieldElementBytes := getRandFieldElement(seed + int64(i)) + copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:]) + } + return blob +} + +// Returns a serialized random field element in big-endian +func getRandFieldElement(seed int64) [32]byte { + bytes := deterministicRandomness(seed) + var r fr.Element + r.SetBytes(bytes[:]) + + return GoKZG.SerializeScalar(r) +} + +func deterministicRandomness(seed int64) [32]byte { + // Converts an int64 to a byte slice + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.BigEndian, seed) + if err != nil { + logrus.WithError(err).Error("Failed to write int64 to bytes buffer") + return [32]byte{} + } + bytes := buf.Bytes() + + return sha256.Sum256(bytes) +} diff --git a/beacon-chain/core/peerdas/validator.go b/beacon-chain/core/peerdas/validator.go new file mode 100644 index 000000000000..a3af8d31a16d --- /dev/null +++ b/beacon-chain/core/peerdas/validator.go @@ -0,0 +1,30 @@ +package peerdas + +import ( + beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state" + "github.com/OffchainLabs/prysm/v6/config/params" + "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" + "github.com/pkg/errors" +) + +// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/validator.md#validator-custody +func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) { + totalNodeBalance := uint64(0) + for index := range validatorsIndex { + validator, err := state.ValidatorAtIndexReadOnly(index) + if err != nil { + return 0, errors.Wrapf(err, "validator at index %v", index) + } + + totalNodeBalance += validator.EffectiveBalance() + } + + beaconConfig := params.BeaconConfig() + numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups + validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement + balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup + + count := totalNodeBalance / balancePerAdditionalCustodyGroup + return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil +} diff --git a/beacon-chain/core/peerdas/validator_test.go b/beacon-chain/core/peerdas/validator_test.go new file mode 100644 index 000000000000..eae30a055431 --- /dev/null +++ b/beacon-chain/core/peerdas/validator_test.go @@ -0,0 +1,55 @@ +package peerdas_test + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" + state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native" + "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/OffchainLabs/prysm/v6/testing/require" +) + +func TestValidatorsCustodyRequirement(t *testing.T) { + testCases := []struct { + name string + count uint64 + expected uint64 + }{ + {name: "0 validators", count: 0, expected: 8}, + {name: "1 validator", count: 1, expected: 8}, + {name: "8 validators", count: 8, expected: 8}, + {name: "9 validators", count: 9, expected: 9}, + {name: "100 validators", count: 100, expected: 100}, + {name: "128 validators", count: 128, expected: 128}, + {name: "129 validators", count: 129, expected: 128}, + {name: "1000 validators", count: 1000, expected: 128}, + } + + const balance = uint64(32_000_000_000) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + validators := make([]*ethpb.Validator, 0, tc.count) + for range tc.count { + validator := ðpb.Validator{ + EffectiveBalance: balance, + } + + validators = append(validators, validator) + } + + validatorsIndex := make(map[primitives.ValidatorIndex]bool) + for i := range tc.count { + validatorsIndex[primitives.ValidatorIndex(i)] = true + } + + beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Validators: validators}) + require.NoError(t, err) + + actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex) + require.NoError(t, err) + require.Equal(t, tc.expected, actual) + }) + } +} diff --git a/beacon-chain/p2p/utils.go b/beacon-chain/p2p/utils.go index 6d83a31962b9..abc2e811c225 100644 --- a/beacon-chain/p2p/utils.go +++ b/beacon-chain/p2p/utils.go @@ -77,8 +77,9 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) { return nil, err } - // If the StaticPeerID flag is not set and if peerDAS is not enabled, return the private key. - if !(cfg.StaticPeerID || params.PeerDASEnabled()) { + // If the StaticPeerID flag is not set or the Fulu epoch is not set, return the private key. + // Starting at Fulu, we don't want to generate a new key every time, to avoid custody columns changes. + if !(cfg.StaticPeerID || params.FuluEnabled()) { return ecdsaprysm.ConvertFromInterfacePrivKey(priv) } diff --git a/beacon-chain/rpc/eth/config/handlers_test.go b/beacon-chain/rpc/eth/config/handlers_test.go index abb3b99527f0..3abc588e5698 100644 --- a/beacon-chain/rpc/eth/config/handlers_test.go +++ b/beacon-chain/rpc/eth/config/handlers_test.go @@ -200,7 +200,7 @@ func TestGetSpec(t *testing.T) { data, ok := resp.Data.(map[string]interface{}) require.Equal(t, true, ok) - assert.Equal(t, 169, len(data)) + assert.Equal(t, 175, len(data)) for k, v := range data { t.Run(k, func(t *testing.T) { switch k { @@ -545,6 +545,18 @@ func TestGetSpec(t *testing.T) { assert.Equal(t, "9", v) case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA": assert.Equal(t, "1152", v) + case "NUMBER_OF_CUSTODY_GROUPS": + assert.Equal(t, "128", v) + case "BALANCE_PER_ADDITIONAL_CUSTODY_GROUP": + assert.Equal(t, "32000000000", v) + case "CUSTODY_REQUIREMENT": + assert.Equal(t, "4", v) + case "SAMPLES_PER_SLOT": + assert.Equal(t, "8", v) + case "VALIDATOR_CUSTODY_REQUIREMENT": + assert.Equal(t, "8", v) + case "MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS": + assert.Equal(t, "4096", v) case "MAX_BLOB_COMMITMENTS_PER_BLOCK": assert.Equal(t, "95", v) case "MAX_BYTES_PER_TRANSACTION": @@ -559,6 +571,8 @@ func TestGetSpec(t *testing.T) { assert.Equal(t, "100", v) case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH": assert.Equal(t, "101", v) + case "MAX_BLOBS_PER_BLOCK_FULU": + assert.Equal(t, "12", v) case "BLOB_SIDECAR_SUBNET_COUNT": assert.Equal(t, "102", v) case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA": diff --git a/changelog/manu-peerdas-core.md b/changelog/manu-peerdas-core.md new file mode 100644 index 000000000000..7ef89d4c4e44 --- /dev/null +++ b/changelog/manu-peerdas-core.md @@ -0,0 +1,2 @@ +### Added +- Implement peerDAS core functions. diff --git a/config/params/config.go b/config/params/config.go index b9aa332b9d7e..eb861625055c 100644 --- a/config/params/config.go +++ b/config/params/config.go @@ -242,7 +242,7 @@ type BeaconChainConfig struct { MaxPerEpochActivationChurnLimit uint64 `yaml:"MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT" spec:"true"` // MaxPerEpochActivationChurnLimit is the maximum amount of churn allotted for validator activation. MinEpochsForBlobsSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS" spec:"true"` // MinEpochsForBlobsSidecarsRequest is the minimum number of epochs the node will keep the blobs for. MaxRequestBlobSidecars uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS" spec:"true"` // MaxRequestBlobSidecars is the maximum number of blobs to request in a single request. - MaxRequestBlobSidecarsElectra uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS_ELECTRA" spec:"true"` // MaxRequestBlobSidecarsElectra is the maximum number of blobs to request in a single request. + MaxRequestBlobSidecarsElectra uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS_ELECTRA" spec:"true"` // MaxRequestBlobSidecarsElectra is the maximum number of blobs to request in a single request after the electra epoch. MaxRequestBlocksDeneb uint64 `yaml:"MAX_REQUEST_BLOCKS_DENEB" spec:"true"` // MaxRequestBlocksDeneb is the maximum number of blocks in a single request after the deneb epoch. FieldElementsPerBlob uint64 `yaml:"FIELD_ELEMENTS_PER_BLOB" spec:"true"` // FieldElementsPerBlob is the number of field elements that constitute a single blob. MaxBlobCommitmentsPerBlock uint64 `yaml:"MAX_BLOB_COMMITMENTS_PER_BLOCK" spec:"true"` // MaxBlobCommitmentsPerBlock is the maximum number of KZG commitments that a block can have. @@ -265,14 +265,17 @@ type BeaconChainConfig struct { MaxDepositRequestsPerPayload uint64 `yaml:"MAX_DEPOSIT_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxDepositRequestsPerPayload is the maximum number of execution layer deposits in each payload UnsetDepositRequestsStartIndex uint64 `yaml:"UNSET_DEPOSIT_REQUESTS_START_INDEX" spec:"true"` // UnsetDepositRequestsStartIndex is used to check the start index for eip6110 - // PeerDAS Values - SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT"` // SamplesPerSlot refers to the number of random samples a node queries per slot. - CustodyRequirement uint64 `yaml:"CUSTODY_REQUIREMENT"` // CustodyRequirement refers to the minimum amount of subnets a peer must custody and serve samples from. - MinEpochsForDataColumnSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS"` // MinEpochsForDataColumnSidecarsRequest is the minimum number of epochs the node will keep the data columns for. - MaxRequestDataColumnSidecars uint64 `yaml:"MAX_REQUEST_DATA_COLUMN_SIDECARS" spec:"true"` // MaxRequestDataColumnSidecars is the maximum number of data column sidecars in a single request - MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX" spec:"true"` // MaxCellsInExtendedMatrix is the full data of one-dimensional erasure coding extended blobs (in row major format). - NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true"` // NumberOfColumns in the extended data matrix. - DataColumnSidecarSubnetCount uint64 `yaml:"DATA_COLUMN_SIDECAR_SUBNET_COUNT" spec:"true"` // DataColumnSidecarSubnetCount is the number of data column sidecar subnets used in the gossipsub protocol + // Values introduced in Fulu upgrade + NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true"` // NumberOfColumns in the extended data matrix. + SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT" spec:"true"` // SamplesPerSlot refers to the number of random samples a node queries per slot. + NumberOfCustodyGroups uint64 `yaml:"NUMBER_OF_CUSTODY_GROUPS" spec:"true"` // NumberOfCustodyGroups available for nodes to custody. + CustodyRequirement uint64 `yaml:"CUSTODY_REQUIREMENT" spec:"true"` // CustodyRequirement refers to the minimum amount of subnets a peer must custody and serve samples from. + MinEpochsForDataColumnSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS" spec:"true"` // MinEpochsForDataColumnSidecarsRequest is the minimum number of epochs the node will keep the data columns for. + MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX"` // MaxCellsInExtendedMatrix is the full data of one-dimensional erasure coding extended blobs (in row major format). + DataColumnSidecarSubnetCount uint64 `yaml:"DATA_COLUMN_SIDECAR_SUBNET_COUNT" spec:"true"` // DataColumnSidecarSubnetCount is the number of data column sidecar subnets used in the gossipsub protocol + MaxRequestDataColumnSidecars uint64 `yaml:"MAX_REQUEST_DATA_COLUMN_SIDECARS" spec:"true"` // MaxRequestDataColumnSidecars is the maximum number of data column sidecars in a single request + ValidatorCustodyRequirement uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" spec:"true"` // ValidatorCustodyRequirement is the minimum number of custody groups an honest node with validators attached custodies and serves samples from + BalancePerAdditionalCustodyGroup uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" spec:"true"` // BalancePerAdditionalCustodyGroup is the balance increment corresponding to one additional group to custody. // Networking Specific Parameters MaxPayloadSize uint64 `yaml:"MAX_PAYLOAD_SIZE" spec:"true"` // MAX_PAYLOAD_SIZE is the maximum allowed size of uncompressed payload in gossip messages and rpc chunks. @@ -304,6 +307,10 @@ type BeaconChainConfig struct { // DeprecatedTargetBlobsPerBlockElectra defines the target number of blobs per block post Electra hard fork. // Deprecated: This field is no longer supported. Avoid using it. DeprecatedTargetBlobsPerBlockElectra int `yaml:"TARGET_BLOBS_PER_BLOCK_ELECTRA" spec:"true"` + + // DeprecatedMaxBlobsPerBlockFulu defines the max blobs that could exist in a block post Fulu hard fork. + // Deprecated: This field is no longer supported. Avoid using it. + DeprecatedMaxBlobsPerBlockFulu int `yaml:"MAX_BLOBS_PER_BLOCK_FULU" spec:"true"` } // InitializeForkSchedule initializes the schedules forks baked into the config. @@ -389,32 +396,49 @@ func (b *BeaconChainConfig) TargetBlobsPerBlock(slot primitives.Slot) int { if primitives.Epoch(slot.DivSlot(b.SlotsPerEpoch)) >= b.ElectraForkEpoch { return b.DeprecatedTargetBlobsPerBlockElectra } + return b.DeprecatedMaxBlobsPerBlock / 2 } -// MaxBlobsPerBlock returns the maximum number of blobs per block for the given slot, -// adjusting for the Electra fork. +// MaxBlobsPerBlock returns the maximum number of blobs per block for the given slot. func (b *BeaconChainConfig) MaxBlobsPerBlock(slot primitives.Slot) int { - if primitives.Epoch(slot.DivSlot(b.SlotsPerEpoch)) >= b.ElectraForkEpoch { + epoch := primitives.Epoch(slot.DivSlot(b.SlotsPerEpoch)) + + if epoch >= b.FuluForkEpoch { + return b.DeprecatedMaxBlobsPerBlockFulu + } + + if epoch >= b.ElectraForkEpoch { return b.DeprecatedMaxBlobsPerBlockElectra } + return b.DeprecatedMaxBlobsPerBlock } // MaxBlobsPerBlockByVersion returns the maximum number of blobs per block for the given fork version func (b *BeaconChainConfig) MaxBlobsPerBlockByVersion(v int) int { + if v >= version.Fulu { + return b.DeprecatedMaxBlobsPerBlockFulu + } + if v >= version.Electra { return b.DeprecatedMaxBlobsPerBlockElectra } + return b.DeprecatedMaxBlobsPerBlock } // MaxBlobsPerBlockByEpoch returns the maximum number of blobs per block for the given epoch, // adjusting for the Electra fork. func (b *BeaconChainConfig) MaxBlobsPerBlockAtEpoch(epoch primitives.Epoch) int { + if epoch >= b.FuluForkEpoch { + return b.DeprecatedMaxBlobsPerBlockFulu + } + if epoch >= b.ElectraForkEpoch { return b.DeprecatedMaxBlobsPerBlockElectra } + return b.DeprecatedMaxBlobsPerBlock } @@ -432,9 +456,9 @@ func ElectraEnabled() bool { return BeaconConfig().ElectraForkEpoch < math.MaxUint64 } -// PeerDASEnabled centralizes the check to determine if code paths -// that are specific to peerdas should be allowed to execute. -func PeerDASEnabled() bool { +// FuluEnabled centralizes the check to determine if code paths that are specific to Fulu should be allowed to execute. +// This will make it easier to find call sites that do this kind of check and remove them post-fulu. +func FuluEnabled() bool { return BeaconConfig().FuluForkEpoch < math.MaxUint64 } diff --git a/config/params/config_test.go b/config/params/config_test.go index bb8ee1e591cf..bad2bc289c92 100644 --- a/config/params/config_test.go +++ b/config/params/config_test.go @@ -141,9 +141,14 @@ func TestMaxBlobsPerBlockByVersion(t *testing.T) { want: params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra, }, { - name: "Version above Electra", - v: version.Electra + 1, - want: params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra, + name: "Version equal to Fulu", + v: version.Fulu, + want: params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu, + }, + { + name: "Version above Fulu", + v: version.Fulu + 1, + want: params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu, }, } diff --git a/config/params/loader.go b/config/params/loader.go index 563a1020ecda..9878dc6e44c2 100644 --- a/config/params/loader.go +++ b/config/params/loader.go @@ -241,6 +241,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte { fmt.Sprintf("MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: %d", cfg.MinPerEpochChurnLimitElectra), fmt.Sprintf("MAX_BLOBS_PER_BLOCK: %d", cfg.DeprecatedMaxBlobsPerBlock), fmt.Sprintf("MAX_BLOBS_PER_BLOCK_ELECTRA: %d", cfg.DeprecatedMaxBlobsPerBlockElectra), + fmt.Sprintf("MAX_BLOBS_PER_BLOCK_FULU: %d", cfg.DeprecatedMaxBlobsPerBlockFulu), } yamlFile := []byte(strings.Join(lines, "\n")) diff --git a/config/params/loader_test.go b/config/params/loader_test.go index 5bbf8e7e48d8..6f0d73f6a14f 100644 --- a/config/params/loader_test.go +++ b/config/params/loader_test.go @@ -25,7 +25,6 @@ import ( // IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts. var placeholderFields = []string{ "ATTESTATION_DEADLINE", - "BALANCE_PER_ADDITIONAL_CUSTODY_GROUP", "BLOB_SIDECAR_SUBNET_COUNT_FULU", "EIP6110_FORK_EPOCH", "EIP6110_FORK_VERSION", @@ -38,15 +37,12 @@ var placeholderFields = []string{ "EIP7805_FORK_EPOCH", "EIP7805_FORK_VERSION", "EPOCHS_PER_SHUFFLING_PHASE", - "MAX_BLOBS_PER_BLOCK_FULU", "MAX_REQUEST_BLOB_SIDECARS_FULU", "MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests - "NUMBER_OF_CUSTODY_GROUPS", "PROPOSER_INCLUSION_LIST_CUT_OFF", "PROPOSER_SELECTION_GAP", "TARGET_NUMBER_OF_PEERS", "UPDATE_TIMEOUT", - "VALIDATOR_CUSTODY_REQUIREMENT", "VIEW_FREEZE_DEADLINE", "WHISK_EPOCHS_PER_SHUFFLING_PHASE", "WHISK_FORK_EPOCH", diff --git a/config/params/mainnet_config.go b/config/params/mainnet_config.go index 9ef092e09d0f..048aec5a3e94 100644 --- a/config/params/mainnet_config.go +++ b/config/params/mainnet_config.go @@ -37,6 +37,7 @@ var mainnetNetworkConfig = &NetworkConfig{ ETH2Key: "eth2", AttSubnetKey: "attnets", SyncCommsSubnetKey: "syncnets", + CustodyGroupCountKey: "cgc", MinimumPeersInSubnetSearch: 20, ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524. BootstrapNodes: []string{ @@ -286,10 +287,9 @@ var mainnetBeaconConfig = &BeaconChainConfig{ FieldElementsPerBlob: 4096, MaxBlobCommitmentsPerBlock: 4096, KzgCommitmentInclusionProofDepth: 17, + DeprecatedMaxBlobsPerBlock: 6, // Values related to electra - MaxRequestDataColumnSidecars: 16384, - DataColumnSidecarSubnetCount: 128, MinPerEpochChurnLimitElectra: 128_000_000_000, MaxPerEpochActivationExitChurnLimit: 256_000_000_000, MaxEffectiveBalanceElectra: 2048_000_000_000, @@ -306,13 +306,22 @@ var mainnetBeaconConfig = &BeaconChainConfig{ MaxWithdrawalRequestsPerPayload: 16, MaxDepositRequestsPerPayload: 8192, // 2**13 (= 8192) UnsetDepositRequestsStartIndex: math.MaxUint64, + DeprecatedMaxBlobsPerBlockElectra: 9, + DeprecatedTargetBlobsPerBlockElectra: 6, + MaxRequestBlobSidecarsElectra: 1152, - // PeerDAS + // Values related to fulu + MaxRequestDataColumnSidecars: 16384, + DataColumnSidecarSubnetCount: 128, NumberOfColumns: 128, - MaxCellsInExtendedMatrix: 768, SamplesPerSlot: 8, + NumberOfCustodyGroups: 128, CustodyRequirement: 4, MinEpochsForDataColumnSidecarsRequest: 4096, + MaxCellsInExtendedMatrix: 768, + ValidatorCustodyRequirement: 8, + BalancePerAdditionalCustodyGroup: 32_000_000_000, + DeprecatedMaxBlobsPerBlockFulu: 12, // Values related to networking parameters. MaxPayloadSize: 10 * 1 << 20, // 10 MiB @@ -330,11 +339,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{ AttestationSubnetPrefixBits: 6, SubnetsPerNode: 2, NodeIdBits: 256, - - DeprecatedMaxBlobsPerBlock: 6, - DeprecatedMaxBlobsPerBlockElectra: 9, - DeprecatedTargetBlobsPerBlockElectra: 6, - MaxRequestBlobSidecarsElectra: 1152, } // MainnetTestConfig provides a version of the mainnet config that has a different name diff --git a/config/params/network_config.go b/config/params/network_config.go index 53b9e933c073..f03e55870977 100644 --- a/config/params/network_config.go +++ b/config/params/network_config.go @@ -8,9 +8,10 @@ import ( // NetworkConfig defines the spec based network parameters. type NetworkConfig struct { // DiscoveryV5 Config - ETH2Key string // ETH2Key is the ENR key of the Ethereum consensus object in an enr. - AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield in the enr. - SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield in the enr. + ETH2Key string // ETH2Key is the ENR key of the Ethereum consensus object. + AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield. + SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield. + CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count. MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search. // Chain Network Config diff --git a/consensus-types/blocks/BUILD.bazel b/consensus-types/blocks/BUILD.bazel index 33064e7893d2..1db754610a8c 100644 --- a/consensus-types/blocks/BUILD.bazel +++ b/consensus-types/blocks/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "proto.go", "roblob.go", "roblock.go", + "rodatacolumn.go", "setters.go", "types.go", ], @@ -51,6 +52,7 @@ go_test( "proto_test.go", "roblob_test.go", "roblock_test.go", + "rodatacolumn_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/consensus-types/blocks/kzg.go b/consensus-types/blocks/kzg.go index 3939d81b9b83..8d04a29e9a47 100644 --- a/consensus-types/blocks/kzg.go +++ b/consensus-types/blocks/kzg.go @@ -80,8 +80,38 @@ func MerkleProofKZGCommitment(body interfaces.ReadOnlyBeaconBlockBody, index int return proof, nil } -// leavesFromCommitments hashes each commitment to construct a slice of roots -func leavesFromCommitments(commitments [][]byte) [][]byte { +// MerkleProofKZGCommitments constructs a Merkle proof of inclusion of the KZG +// commitments into the Beacon Block with the given `body` +func MerkleProofKZGCommitments(body interfaces.ReadOnlyBeaconBlockBody) ([][]byte, error) { + bodyVersion := body.Version() + if bodyVersion < version.Deneb { + return nil, errUnsupportedBeaconBlockBody + } + + membersRoots, err := topLevelRoots(body) + if err != nil { + return nil, errors.Wrap(err, "top level roots") + } + + sparse, err := trie.GenerateTrieFromItems(membersRoots, logBodyLength) + if err != nil { + return nil, errors.Wrap(err, "generate trie from items") + } + + proof, err := sparse.MerkleProof(kzgPosition) + if err != nil { + return nil, errors.Wrap(err, "merkle proof") + } + + // Remove the last element as it is a mix in with the number of + // elements in the trie. + proof = proof[:len(proof)-1] + + return proof, nil +} + +// LeavesFromCommitments hashes each commitment to construct a slice of roots +func LeavesFromCommitments(commitments [][]byte) [][]byte { leaves := make([][]byte, len(commitments)) for i, kzg := range commitments { chunk := makeChunk(kzg) @@ -105,7 +135,7 @@ func bodyProof(commitments [][]byte, index int) ([][]byte, error) { if index < 0 || index >= len(commitments) { return nil, errInvalidIndex } - leaves := leavesFromCommitments(commitments) + leaves := LeavesFromCommitments(commitments) sparse, err := trie.GenerateTrieFromItems(leaves, field_params.LogMaxBlobCommitments) if err != nil { return nil, err diff --git a/consensus-types/blocks/kzg_test.go b/consensus-types/blocks/kzg_test.go index 61fd4441d4d5..f4486015f458 100644 --- a/consensus-types/blocks/kzg_test.go +++ b/consensus-types/blocks/kzg_test.go @@ -6,6 +6,7 @@ import ( "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" + "github.com/OffchainLabs/prysm/v6/consensus-types/interfaces" "github.com/OffchainLabs/prysm/v6/container/trie" enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1" ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" @@ -32,7 +33,7 @@ func Test_MerkleProofKZGCommitment_Altair(t *testing.T) { require.ErrorIs(t, errUnsupportedBeaconBlockBody, err) } -func Test_MerkleProofKZGCommitment(t *testing.T) { +func buildTestKzgsAndBody(t *testing.T) ([][]byte, interfaces.ReadOnlyBeaconBlockBody) { kzgs := make([][]byte, 3) kzgs[0] = make([]byte, 48) _, err := rand.Read(kzgs[0]) @@ -69,8 +70,15 @@ func Test_MerkleProofKZGCommitment(t *testing.T) { body, err := NewBeaconBlockBody(pbBody) require.NoError(t, err) - index := 1 - _, err = MerkleProofKZGCommitment(body, 10) + + return kzgs, body +} + +func Test_MerkleProofKZGCommitment(t *testing.T) { + const index = 1 + + kzgs, body := buildTestKzgsAndBody(t) + _, err := MerkleProofKZGCommitment(body, 10) require.ErrorIs(t, errInvalidIndex, err) proof, err := MerkleProofKZGCommitment(body, index) require.NoError(t, err) @@ -104,6 +112,40 @@ func Test_MerkleProofKZGCommitment(t *testing.T) { require.Equal(t, true, trie.VerifyMerkleProof(root[:], chunk[0][:], uint64(index+KZGOffset), proof)) } +func TestMerkleProofKZGCommitments(t *testing.T) { + t.Run("invalid version", func(t *testing.T) { + pbBody := ðpb.BeaconBlockBodyAltair{} + + body, err := NewBeaconBlockBody(pbBody) + require.NoError(t, err) + _, err = MerkleProofKZGCommitments(body) + require.ErrorIs(t, errUnsupportedBeaconBlockBody, err) + }) + + t.Run("nominal", func(t *testing.T) { + kzgs, body := buildTestKzgsAndBody(t) + + proof, err := MerkleProofKZGCommitments(body) + require.NoError(t, err) + + commitmentsRoot, err := getBlobKzgCommitmentsRoot(kzgs) + require.NoError(t, err) + + bodyMembersRoots, err := topLevelRoots(body) + require.NoError(t, err, "Failed to get top level roots") + + bodySparse, err := trie.GenerateTrieFromItems(bodyMembersRoots, logBodyLength) + require.NoError(t, err, "Failed to generate trie from member roots") + + require.Equal(t, bodyLength, bodySparse.NumOfItems()) + + root, err := body.HashTreeRoot() + require.NoError(t, err) + + require.Equal(t, true, trie.VerifyMerkleProof(root[:], commitmentsRoot[:], kzgPosition, proof)) + }) +} + // This test explains the calculation of the KZG commitment root's Merkle index // in the Body's Merkle tree based on the index of the KZG commitment list in the Body. func Test_KZGRootIndex(t *testing.T) { @@ -139,7 +181,7 @@ func ceilLog2(x uint32) (uint32, error) { } func getBlobKzgCommitmentsRoot(commitments [][]byte) ([32]byte, error) { - commitmentsLeaves := leavesFromCommitments(commitments) + commitmentsLeaves := LeavesFromCommitments(commitments) commitmentsSparse, err := trie.GenerateTrieFromItems( commitmentsLeaves, fieldparams.LogMaxBlobCommitments, diff --git a/consensus-types/blocks/rodatacolumn.go b/consensus-types/blocks/rodatacolumn.go new file mode 100644 index 000000000000..14cace6d7aeb --- /dev/null +++ b/consensus-types/blocks/rodatacolumn.go @@ -0,0 +1,68 @@ +package blocks + +import ( + fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" +) + +// RODataColumn represents a read-only data column sidecar with its block root. +type RODataColumn struct { + *ethpb.DataColumnSidecar + root [fieldparams.RootLength]byte +} + +func roDataColumnNilCheck(dc *ethpb.DataColumnSidecar) error { + // Check if the data column is nil. + if dc == nil { + return errNilDataColumn + } + + // Check if the data column header is nil. + if dc.SignedBlockHeader == nil || dc.SignedBlockHeader.Header == nil { + return errNilBlockHeader + } + + // Check if the data column signature is nil. + if len(dc.SignedBlockHeader.Signature) == 0 { + return errMissingBlockSignature + } + + return nil +} + +// NewRODataColumn creates a new RODataColumn by computing the HashTreeRoot of the header. +func NewRODataColumn(dc *ethpb.DataColumnSidecar) (RODataColumn, error) { + if err := roDataColumnNilCheck(dc); err != nil { + return RODataColumn{}, err + } + root, err := dc.SignedBlockHeader.Header.HashTreeRoot() + if err != nil { + return RODataColumn{}, err + } + return RODataColumn{DataColumnSidecar: dc, root: root}, nil +} + +// NewRODataColumnWithRoot creates a new RODataColumn with a given root. +func NewRODataColumnWithRoot(dc *ethpb.DataColumnSidecar, root [fieldparams.RootLength]byte) (RODataColumn, error) { + // Check if the data column is nil. + if err := roDataColumnNilCheck(dc); err != nil { + return RODataColumn{}, err + } + + return RODataColumn{DataColumnSidecar: dc, root: root}, nil +} + +// BlockRoot returns the root of the block. +func (dc *RODataColumn) BlockRoot() [fieldparams.RootLength]byte { + return dc.root +} + +// VerifiedRODataColumn represents an RODataColumn that has undergone full verification (eg block sig, inclusion proof, commitment check). +type VerifiedRODataColumn struct { + RODataColumn +} + +// NewVerifiedRODataColumn "upgrades" an RODataColumn to a VerifiedRODataColumn. This method should only be used by the verification package. +func NewVerifiedRODataColumn(roDataColumn RODataColumn) VerifiedRODataColumn { + return VerifiedRODataColumn{RODataColumn: roDataColumn} +} diff --git a/consensus-types/blocks/rodatacolumn_test.go b/consensus-types/blocks/rodatacolumn_test.go new file mode 100644 index 000000000000..5dca64cc77f9 --- /dev/null +++ b/consensus-types/blocks/rodatacolumn_test.go @@ -0,0 +1,125 @@ +package blocks + +import ( + "testing" + + fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" + "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" + ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" + "github.com/OffchainLabs/prysm/v6/testing/assert" + "github.com/OffchainLabs/prysm/v6/testing/require" +) + +func TestNewRODataColumnWithAndWithoutRoot(t *testing.T) { + cases := []struct { + name string + dcFunc func(t *testing.T) *ethpb.DataColumnSidecar + err error + root []byte + }{ + { + name: "nil signed data column", + dcFunc: func(t *testing.T) *ethpb.DataColumnSidecar { + return nil + }, + err: errNilDataColumn, + root: bytesutil.PadTo([]byte("sup"), fieldparams.RootLength), + }, + { + name: "nil signed block header", + dcFunc: func(t *testing.T) *ethpb.DataColumnSidecar { + return ðpb.DataColumnSidecar{ + SignedBlockHeader: nil, + } + }, + err: errNilBlockHeader, + root: bytesutil.PadTo([]byte("sup"), fieldparams.RootLength), + }, + { + name: "nil inner header", + dcFunc: func(t *testing.T) *ethpb.DataColumnSidecar { + return ðpb.DataColumnSidecar{ + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: nil, + }, + } + }, + err: errNilBlockHeader, + root: bytesutil.PadTo([]byte("sup"), fieldparams.RootLength), + }, + { + name: "nil signature", + dcFunc: func(t *testing.T) *ethpb.DataColumnSidecar { + return ðpb.DataColumnSidecar{ + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + ParentRoot: make([]byte, fieldparams.RootLength), + StateRoot: make([]byte, fieldparams.RootLength), + BodyRoot: make([]byte, fieldparams.RootLength), + }, + Signature: nil, + }, + } + }, + err: errMissingBlockSignature, + root: bytesutil.PadTo([]byte("sup"), fieldparams.RootLength), + }, + { + name: "nominal", + dcFunc: func(t *testing.T) *ethpb.DataColumnSidecar { + return ðpb.DataColumnSidecar{ + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + ParentRoot: make([]byte, fieldparams.RootLength), + StateRoot: make([]byte, fieldparams.RootLength), + BodyRoot: make([]byte, fieldparams.RootLength), + }, + Signature: make([]byte, fieldparams.BLSSignatureLength), + }, + } + }, + root: bytesutil.PadTo([]byte("sup"), fieldparams.RootLength), + }, + } + for _, c := range cases { + t.Run(c.name+" NewRODataColumn", func(t *testing.T) { + dataColumnSidecar := c.dcFunc(t) + roDataColumnSidecar, err := NewRODataColumn(dataColumnSidecar) + + if c.err != nil { + require.ErrorIs(t, err, c.err) + return + } + + require.NoError(t, err) + hr, err := dataColumnSidecar.SignedBlockHeader.Header.HashTreeRoot() + require.NoError(t, err) + require.Equal(t, hr, roDataColumnSidecar.BlockRoot()) + }) + + if len(c.root) == 0 { + continue + } + + t.Run(c.name+" NewRODataColumnWithRoot", func(t *testing.T) { + b := c.dcFunc(t) + + // We want the same validation when specifying a root. + bl, err := NewRODataColumnWithRoot(b, bytesutil.ToBytes32(c.root)) + if c.err != nil { + require.ErrorIs(t, err, c.err) + return + } + + assert.Equal(t, bytesutil.ToBytes32(c.root), bl.BlockRoot()) + }) + } +} + +func TestDataColumn_BlockRoot(t *testing.T) { + root := [fieldparams.RootLength]byte{1} + dataColumn := &RODataColumn{ + root: root, + } + assert.Equal(t, root, dataColumn.BlockRoot()) +} diff --git a/consensus-types/blocks/types.go b/consensus-types/blocks/types.go index eb033364f078..26601720250e 100644 --- a/consensus-types/blocks/types.go +++ b/consensus-types/blocks/types.go @@ -29,6 +29,7 @@ var ( // ErrUnsupportedVersion for beacon block methods. ErrUnsupportedVersion = errors.New("unsupported beacon block version") errNilBlob = errors.New("received nil blob sidecar") + errNilDataColumn = errors.New("received nil data column sidecar") errNilBlock = errors.New("received nil beacon block") errNilBlockBody = errors.New("received nil beacon block body") errIncorrectBlockVersion = errors.New(incorrectBlockVersion) diff --git a/testing/spectest/mainnet/fulu/networking/BUILD.bazel b/testing/spectest/mainnet/fulu/networking/BUILD.bazel new file mode 100644 index 000000000000..0a7200017c18 --- /dev/null +++ b/testing/spectest/mainnet/fulu/networking/BUILD.bazel @@ -0,0 +1,12 @@ +load("@prysm//tools/go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + size = "small", + srcs = ["custody_groups_test.go"], + data = glob(["*.yaml"]) + [ + "@consensus_spec_tests_mainnet//:test_data", + ], + tags = ["spectest"], + deps = ["//testing/spectest/shared/fulu/networking:go_default_library"], +) diff --git a/testing/spectest/mainnet/fulu/networking/custody_groups_test.go b/testing/spectest/mainnet/fulu/networking/custody_groups_test.go new file mode 100644 index 000000000000..3b36376453e9 --- /dev/null +++ b/testing/spectest/mainnet/fulu/networking/custody_groups_test.go @@ -0,0 +1,15 @@ +package networking + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/testing/spectest/shared/fulu/networking" +) + +func TestMainnet_Fulu_Networking_CustodyGroups(t *testing.T) { + networking.RunCustodyGroupsTest(t, "mainnet") +} + +func TestMainnet_Fulu_Networking_ComputeCustodyColumnsForCustodyGroup(t *testing.T) { + networking.RunComputeColumnsForCustodyGroupTest(t, "mainnet") +} diff --git a/testing/spectest/minimal/fulu/networking/BUILD.bazel b/testing/spectest/minimal/fulu/networking/BUILD.bazel new file mode 100644 index 000000000000..0da30acdc574 --- /dev/null +++ b/testing/spectest/minimal/fulu/networking/BUILD.bazel @@ -0,0 +1,12 @@ +load("@prysm//tools/go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + size = "small", + srcs = ["custody_columns_test.go"], + data = glob(["*.yaml"]) + [ + "@consensus_spec_tests_minimal//:test_data", + ], + tags = ["spectest"], + deps = ["//testing/spectest/shared/fulu/networking:go_default_library"], +) diff --git a/testing/spectest/minimal/fulu/networking/custody_columns_test.go b/testing/spectest/minimal/fulu/networking/custody_columns_test.go new file mode 100644 index 000000000000..43419b86c249 --- /dev/null +++ b/testing/spectest/minimal/fulu/networking/custody_columns_test.go @@ -0,0 +1,15 @@ +package networking + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/testing/spectest/shared/fulu/networking" +) + +func TestMainnet_Fulu_Networking_CustodyGroups(t *testing.T) { + networking.RunCustodyGroupsTest(t, "minimal") +} + +func TestMainnet_Fulu_Networking_ComputeCustodyColumnsForCustodyGroup(t *testing.T) { + networking.RunComputeColumnsForCustodyGroupTest(t, "minimal") +} diff --git a/testing/spectest/shared/fulu/networking/BUILD.bazel b/testing/spectest/shared/fulu/networking/BUILD.bazel new file mode 100644 index 000000000000..bec4d77f4852 --- /dev/null +++ b/testing/spectest/shared/fulu/networking/BUILD.bazel @@ -0,0 +1,17 @@ +load("@prysm//tools/go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + testonly = True, + srcs = ["custody_groups.go"], + importpath = "github.com/OffchainLabs/prysm/v6/testing/spectest/shared/fulu/networking", + visibility = ["//visibility:public"], + deps = [ + "//beacon-chain/core/peerdas:go_default_library", + "//testing/require:go_default_library", + "//testing/spectest/utils:go_default_library", + "//testing/util:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", + "@in_gopkg_yaml_v3//:go_default_library", + ], +) diff --git a/testing/spectest/shared/fulu/networking/custody_groups.go b/testing/spectest/shared/fulu/networking/custody_groups.go new file mode 100644 index 000000000000..91cd52536604 --- /dev/null +++ b/testing/spectest/shared/fulu/networking/custody_groups.go @@ -0,0 +1,107 @@ +package networking + +import ( + "math/big" + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" + "github.com/OffchainLabs/prysm/v6/testing/require" + "github.com/OffchainLabs/prysm/v6/testing/spectest/utils" + "github.com/OffchainLabs/prysm/v6/testing/util" + "github.com/ethereum/go-ethereum/p2p/enode" + "gopkg.in/yaml.v3" +) + +// RunCustodyGroupsTest executes custody groups spec tests. +func RunCustodyGroupsTest(t *testing.T, config string) { + type configuration struct { + NodeId *big.Int `yaml:"node_id"` + CustodyGroupCount uint64 `yaml:"custody_group_count"` + Expected []uint64 `yaml:"result"` + } + + err := utils.SetConfig(t, config) + require.NoError(t, err, "failed to set config") + + // Retrieve the test vector folders. + testFolders, testsFolderPath := utils.TestFolders(t, config, "fulu", "networking/get_custody_groups/pyspec_tests") + if len(testFolders) == 0 { + t.Fatalf("no test folders found for %s", testsFolderPath) + } + + for _, folder := range testFolders { + t.Run(folder.Name(), func(t *testing.T) { + var ( + config configuration + nodeIdBytes32 [32]byte + ) + + // Load the test vector. + file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "meta.yaml") + require.NoError(t, err, "failed to retrieve the `meta.yaml` YAML file") + + // Unmarshal the test vector. + err = yaml.Unmarshal(file, &config) + require.NoError(t, err, "failed to unmarshal the YAML file") + + // Get the node ID. + nodeIdBytes := make([]byte, 32) + config.NodeId.FillBytes(nodeIdBytes) + copy(nodeIdBytes32[:], nodeIdBytes) + nodeId := enode.ID(nodeIdBytes32) + + // Compute the custody groups. + actual, err := peerdas.CustodyGroups(nodeId, config.CustodyGroupCount) + require.NoError(t, err, "failed to compute the custody groups") + + // Compare the results. + require.Equal(t, len(config.Expected), len(actual)) + + for i := range config.Expected { + require.Equal(t, config.Expected[i], actual[i], "at position %d", i) + } + }) + } +} + +// RunComputeColumnsForCustodyGroupTest executes compute columns for custody group spec tests. +func RunComputeColumnsForCustodyGroupTest(t *testing.T, config string) { + type configuration struct { + CustodyGroup uint64 `yaml:"custody_group"` + Expected []uint64 `yaml:"result"` + } + + err := utils.SetConfig(t, config) + require.NoError(t, err, "failed to set config") + + // Retrieve the test vector folders. + testFolders, testsFolderPath := utils.TestFolders(t, config, "fulu", "networking/compute_columns_for_custody_group/pyspec_tests") + if len(testFolders) == 0 { + t.Fatalf("no test folders found for %s", testsFolderPath) + } + + for _, folder := range testFolders { + t.Run(folder.Name(), func(t *testing.T) { + var config configuration + + // Load the test vector. + file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "meta.yaml") + require.NoError(t, err, "failed to retrieve the `meta.yaml` YAML file") + + // Unmarshal the test vector. + err = yaml.Unmarshal(file, &config) + require.NoError(t, err, "failed to unmarshal the YAML file") + + // Compute the custody columns. + actual, err := peerdas.ComputeColumnsForCustodyGroup(config.CustodyGroup) + require.NoError(t, err, "failed to compute the custody columns") + + // Compare the results. + require.Equal(t, len(config.Expected), len(actual), "expected %d custody columns, got %d", len(config.Expected), len(actual)) + + for i := range config.Expected { + require.Equal(t, config.Expected[i], actual[i], "expected column at index %i differs from actual column", i) + } + }) + } +} diff --git a/testing/util/BUILD.bazel b/testing/util/BUILD.bazel index 2db1a866f5ce..9f806bb283fe 100644 --- a/testing/util/BUILD.bazel +++ b/testing/util/BUILD.bazel @@ -19,6 +19,7 @@ go_library( "electra.go", "electra_block.go", "electra_state.go", + "fulu.go", "helpers.go", "lightclient.go", "logging.go", @@ -31,6 +32,7 @@ go_library( importpath = "github.com/OffchainLabs/prysm/v6/testing/util", visibility = ["//visibility:public"], deps = [ + "//beacon-chain/blockchain/kzg:go_default_library", "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/blocks:go_default_library", "//beacon-chain/core/helpers:go_default_library", diff --git a/testing/util/fulu.go b/testing/util/fulu.go new file mode 100644 index 000000000000..3818c7fb1a75 --- /dev/null +++ b/testing/util/fulu.go @@ -0,0 +1,18 @@ +package util + +import ( + "testing" + + "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" + "github.com/OffchainLabs/prysm/v6/testing/require" +) + +func GenerateCellsAndProofs(t *testing.T, blobs []kzg.Blob) []kzg.CellsAndProofs { + cellsAndProofs := make([]kzg.CellsAndProofs, len(blobs)) + for i := range blobs { + cp, err := kzg.ComputeCellsAndKZGProofs(&blobs[i]) + require.NoError(t, err) + cellsAndProofs[i] = cp + } + return cellsAndProofs +}