diff --git a/encoding/codecv1/codecv1.go b/encoding/codecv1/codecv1.go index 205f257..4ed048b 100644 --- a/encoding/codecv1/codecv1.go +++ b/encoding/codecv1/codecv1.go @@ -8,9 +8,7 @@ import ( "fmt" "math/big" "strings" - "sync" - "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" @@ -260,7 +258,7 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484 copy(challengePreimage[0:], hash[:]) // convert raw data to BLSFieldElements - blob, err := MakeBlobCanonical(blobBytes) + blob, err := encoding.MakeBlobCanonical(blobBytes) if err != nil { return nil, common.Hash{}, nil, err } @@ -288,31 +286,6 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484 return blob, blobVersionedHash, &z, nil } -// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. -func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) { - // blob contains 131072 bytes but we can only utilize 31/32 of these - if len(blobBytes) > 126976 { - return nil, fmt.Errorf("oversized batch payload, blob bytes length: %v, max length: %v", len(blobBytes), 126976) - } - - // the canonical (padded) blob payload - var blob kzg4844.Blob - - // encode blob payload by prepending every 31 bytes with 1 zero byte - index := 0 - - for from := 0; from < len(blobBytes); from += 31 { - to := from + 31 - if to > len(blobBytes) { - to = len(blobBytes) - } - copy(blob[index+1:], blobBytes[from:to]) - index += 32 - } - - return &blob, nil -} - // NewDABatchFromBytes decodes the given byte slice into a DABatch. // Note: This function only populates the batch header, it leaves the blob-related fields empty. func NewDABatchFromBytes(data []byte) (*DABatch, error) { @@ -379,7 +352,7 @@ func (b *DABatch) BlobDataProof() ([]byte, error) { // | bytes32 | bytes32 | bytes48 | bytes48 | values := []interface{}{*b.z, y, commitment, proof} - blobDataProofArgs, err := GetBlobDataProofArgs() + blobDataProofArgs, err := encoding.GetBlobDataProofArgs() if err != nil { return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err) } @@ -398,7 +371,7 @@ func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) { if err != nil { return 0, err } - return CalculatePaddedBlobSize(metadataSize + chunkDataSize), nil + return encoding.CalculatePaddedBlobSize(metadataSize + chunkDataSize), nil } // EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch. @@ -412,7 +385,7 @@ func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) { } batchDataSize += chunkDataSize } - return CalculatePaddedBlobSize(metadataSize + batchDataSize), nil + return encoding.CalculatePaddedBlobSize(metadataSize + batchDataSize), nil } func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) { @@ -550,55 +523,3 @@ func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) uint64 { } return totalL1CommitCalldataSize } - -// CalculatePaddedBlobSize calculates the required size on blob storage -// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero. -func CalculatePaddedBlobSize(dataSize uint64) uint64 { - paddedSize := (dataSize / 31) * 32 - - if dataSize%31 != 0 { - paddedSize += 1 + dataSize%31 // Add 1 byte for the first empty byte plus the remainder bytes - } - - return paddedSize -} - -var ( - blobDataProofArgs *abi.Arguments - initBlobDataProofArgsOnce sync.Once -) - -// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails. -func GetBlobDataProofArgs() (*abi.Arguments, error) { - var initError error - - initBlobDataProofArgsOnce.Do(func() { - // Initialize bytes32 type - bytes32Type, err := abi.NewType("bytes32", "bytes32", nil) - if err != nil { - initError = fmt.Errorf("failed to initialize abi type bytes32: %w", err) - return - } - - // Initialize bytes48 type - bytes48Type, err := abi.NewType("bytes48", "bytes48", nil) - if err != nil { - initError = fmt.Errorf("failed to initialize abi type bytes48: %w", err) - return - } - - // Successfully create the argument list - blobDataProofArgs = &abi.Arguments{ - {Type: bytes32Type, Name: "z"}, - {Type: bytes32Type, Name: "y"}, - {Type: bytes48Type, Name: "kzg_commitment"}, - {Type: bytes48Type, Name: "kzg_proof"}, - } - }) - - if initError != nil { - return nil, initError - } - - return blobDataProofArgs, nil -} diff --git a/encoding/codecv2/codecv2.go b/encoding/codecv2/codecv2.go index b5ed267..7588394 100644 --- a/encoding/codecv2/codecv2.go +++ b/encoding/codecv2/codecv2.go @@ -1,11 +1,5 @@ package codecv2 -/* -#include -char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size); -*/ -import "C" - import ( "crypto/sha256" "encoding/binary" @@ -13,9 +7,7 @@ import ( "errors" "fmt" "math/big" - "unsafe" - "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" @@ -24,6 +16,7 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/da-codec/encoding/codecv1" + "github.com/scroll-tech/da-codec/encoding/zstd" ) // MaxNumChunks is the maximum number of chunks that a batch can contain. @@ -176,7 +169,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484 copy(challengePreimage[0:], hash[:]) // blobBytes represents the compressed blob payload (batchBytes) - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return nil, common.Hash{}, nil, nil, err } @@ -196,7 +189,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484 } // convert raw data to BLSFieldElements - blob, err := MakeBlobCanonical(blobBytes) + blob, err := encoding.MakeBlobCanonical(blobBytes) if err != nil { return nil, common.Hash{}, nil, nil, err } @@ -224,11 +217,6 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484 return blob, blobVersionedHash, &z, blobBytes, nil } -// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. -func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) { - return codecv1.MakeBlobCanonical(blobBytes) -} - // NewDABatchFromBytes decodes the given byte slice into a DABatch. // Note: This function only populates the batch header, it leaves the blob-related fields empty. func NewDABatchFromBytes(data []byte) (*DABatch, error) { @@ -295,7 +283,7 @@ func (b *DABatch) BlobDataProof() ([]byte, error) { // | bytes32 | bytes32 | bytes48 | bytes48 | values := []interface{}{*b.z, y, commitment, proof} - blobDataProofArgs, err := GetBlobDataProofArgs() + blobDataProofArgs, err := encoding.GetBlobDataProofArgs() if err != nil { return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err) } @@ -309,38 +297,38 @@ func (b *DABatch) Blob() *kzg4844.Blob { // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk. func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) { - batchBytes, err := constructBatchPayload([]*encoding.Chunk{c}) + batchBytes, err := encoding.ConstructBatchPayloadInBlob([]*encoding.Chunk{c}, MaxNumChunks) if err != nil { return 0, 0, err } - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return 0, 0, err } - return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil + return uint64(len(batchBytes)), encoding.CalculatePaddedBlobSize(uint64(len(blobBytes))), nil } // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch. func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint64, error) { - batchBytes, err := constructBatchPayload(b.Chunks) + batchBytes, err := encoding.ConstructBatchPayloadInBlob(b.Chunks, MaxNumChunks) if err != nil { return 0, 0, err } - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return 0, 0, err } - return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil + return uint64(len(batchBytes)), encoding.CalculatePaddedBlobSize(uint64(len(blobBytes))), nil } // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. // It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. func CheckChunkCompressedDataCompatibility(c *encoding.Chunk) (bool, error) { - batchBytes, err := constructBatchPayload([]*encoding.Chunk{c}) + batchBytes, err := encoding.ConstructBatchPayloadInBlob([]*encoding.Chunk{c}, MaxNumChunks) if err != nil { return false, err } - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return false, err } @@ -358,11 +346,11 @@ func CheckChunkCompressedDataCompatibility(c *encoding.Chunk) (bool, error) { // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. // It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. func CheckBatchCompressedDataCompatibility(b *encoding.Batch) (bool, error) { - batchBytes, err := constructBatchPayload(b.Chunks) + batchBytes, err := encoding.ConstructBatchPayloadInBlob(b.Chunks, MaxNumChunks) if err != nil { return false, err } - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return false, err } @@ -401,68 +389,3 @@ func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 { func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 { return codecv1.EstimateBatchL1CommitGas(b) } - -// constructBatchPayload constructs the batch payload. -// This function is only used in compressed batch payload length estimation. -func constructBatchPayload(chunks []*encoding.Chunk) ([]byte, error) { - // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + MaxNumChunks*4 - - // batchBytes represents the raw (un-compressed and un-padded) blob payload - batchBytes := make([]byte, metadataLength) - - // batch metadata: num_chunks - binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks))) - - // encode batch metadata and L2 transactions, - for chunkID, chunk := range chunks { - currentChunkStartIndex := len(batchBytes) - - for _, block := range chunk.Blocks { - for _, tx := range block.Transactions { - if tx.Type == types.L1MessageTxType { - continue - } - - // encode L2 txs into batch payload - rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx, false /* no mock */) - if err != nil { - return nil, err - } - batchBytes = append(batchBytes, rlpTxData...) - } - } - - // batch metadata: chunki_size - if chunkSize := len(batchBytes) - currentChunkStartIndex; chunkSize != 0 { - binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize)) - } - } - return batchBytes, nil -} - -// compressScrollBatchBytes compresses the given batch of bytes. -// The output buffer is allocated with an extra 128 bytes to accommodate metadata overhead or error message. -func compressScrollBatchBytes(batchBytes []byte) ([]byte, error) { - srcSize := C.uint64_t(len(batchBytes)) - outbufSize := C.uint64_t(len(batchBytes) + 128) // Allocate output buffer with extra 128 bytes - outbuf := make([]byte, outbufSize) - - if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize, - (*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil { - return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err)) - } - - return outbuf[:int(outbufSize)], nil -} - -// CalculatePaddedBlobSize calculates the required size on blob storage -// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero. -func CalculatePaddedBlobSize(dataSize uint64) uint64 { - return codecv1.CalculatePaddedBlobSize(dataSize) -} - -// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails. -func GetBlobDataProofArgs() (*abi.Arguments, error) { - return codecv1.GetBlobDataProofArgs() -} diff --git a/encoding/codecv3/codecv3.go b/encoding/codecv3/codecv3.go index 5c82d10..0a85efa 100644 --- a/encoding/codecv3/codecv3.go +++ b/encoding/codecv3/codecv3.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" - "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" @@ -223,7 +222,7 @@ func (b *DABatch) BlobDataProofForPointEvaluation() ([]byte, error) { // | bytes32 | bytes32 | bytes48 | bytes48 | values := []interface{}{*b.z, y, commitment, proof} - blobDataProofArgs, err := GetBlobDataProofArgs() + blobDataProofArgs, err := encoding.GetBlobDataProofArgs() if err != nil { return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err) } @@ -279,8 +278,3 @@ func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 { func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 { return codecv2.EstimateBatchL1CommitGas(b) + 50000 // plus 50000 for the point-evaluation precompile call. } - -// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails. -func GetBlobDataProofArgs() (*abi.Arguments, error) { - return codecv2.GetBlobDataProofArgs() -} diff --git a/encoding/codecv4/codecv4.go b/encoding/codecv4/codecv4.go index 4402b6a..b07e2be 100644 --- a/encoding/codecv4/codecv4.go +++ b/encoding/codecv4/codecv4.go @@ -1,11 +1,5 @@ package codecv4 -/* -#include -char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size); -*/ -import "C" - import ( "crypto/sha256" "encoding/binary" @@ -13,9 +7,7 @@ import ( "errors" "fmt" "math/big" - "unsafe" - "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" @@ -23,8 +15,8 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv1" "github.com/scroll-tech/da-codec/encoding/codecv3" + "github.com/scroll-tech/da-codec/encoding/zstd" ) // MaxNumChunks is the maximum number of chunks that a batch can contain. @@ -197,7 +189,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, enableCompress bool, useMock if enableCompress { // blobBytes represents the compressed blob payload (batchBytes) var err error - blobBytes, err = compressScrollBatchBytes(batchBytes) + blobBytes, err = zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return nil, common.Hash{}, nil, nil, err } @@ -210,7 +202,6 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, enableCompress bool, useMock } blobBytes = append([]byte{1}, blobBytes...) } else { - blobBytes = batchBytes blobBytes = append([]byte{0}, batchBytes...) } @@ -220,7 +211,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, enableCompress bool, useMock } // convert raw data to BLSFieldElements - blob, err := MakeBlobCanonical(blobBytes) + blob, err := encoding.MakeBlobCanonical(blobBytes) if err != nil { return nil, common.Hash{}, nil, nil, err } @@ -345,7 +336,7 @@ func (b *DABatch) BlobDataProofForPointEvaluation() ([]byte, error) { // | bytes32 | bytes32 | bytes48 | bytes48 | values := []interface{}{*b.z, y, commitment, proof} - blobDataProofArgs, err := GetBlobDataProofArgs() + blobDataProofArgs, err := encoding.GetBlobDataProofArgs() if err != nil { return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err) } @@ -364,13 +355,13 @@ func (b *DABatch) BlobBytes() []byte { // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk. func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, enableCompress bool) (uint64, uint64, error) { - batchBytes, err := constructBatchPayload([]*encoding.Chunk{c}) + batchBytes, err := encoding.ConstructBatchPayloadInBlob([]*encoding.Chunk{c}, MaxNumChunks) if err != nil { return 0, 0, err } var blobBytesLength uint64 if enableCompress { - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return 0, 0, err } @@ -378,18 +369,18 @@ func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, enableCompress } else { blobBytesLength = 1 + uint64(len(batchBytes)) } - return uint64(len(batchBytes)), CalculatePaddedBlobSize(blobBytesLength), nil + return uint64(len(batchBytes)), encoding.CalculatePaddedBlobSize(blobBytesLength), nil } // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch. func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, enableCompress bool) (uint64, uint64, error) { - batchBytes, err := constructBatchPayload(b.Chunks) + batchBytes, err := encoding.ConstructBatchPayloadInBlob(b.Chunks, MaxNumChunks) if err != nil { return 0, 0, err } var blobBytesLength uint64 if enableCompress { - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return 0, 0, err } @@ -397,16 +388,16 @@ func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, enableCompress } else { blobBytesLength = 1 + uint64(len(batchBytes)) } - return uint64(len(batchBytes)), CalculatePaddedBlobSize(blobBytesLength), nil + return uint64(len(batchBytes)), encoding.CalculatePaddedBlobSize(blobBytesLength), nil } // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. func CheckChunkCompressedDataCompatibility(c *encoding.Chunk) (bool, error) { - batchBytes, err := constructBatchPayload([]*encoding.Chunk{c}) + batchBytes, err := encoding.ConstructBatchPayloadInBlob([]*encoding.Chunk{c}, MaxNumChunks) if err != nil { return false, err } - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return false, err } @@ -419,11 +410,11 @@ func CheckChunkCompressedDataCompatibility(c *encoding.Chunk) (bool, error) { // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. func CheckBatchCompressedDataCompatibility(b *encoding.Batch) (bool, error) { - batchBytes, err := constructBatchPayload(b.Chunks) + batchBytes, err := encoding.ConstructBatchPayloadInBlob(b.Chunks, MaxNumChunks) if err != nil { return false, err } - blobBytes, err := compressScrollBatchBytes(batchBytes) + blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) if err != nil { return false, err } @@ -453,91 +444,3 @@ func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 { func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 { return codecv3.EstimateBatchL1CommitGas(b) } - -// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails. -func GetBlobDataProofArgs() (*abi.Arguments, error) { - return codecv3.GetBlobDataProofArgs() -} - -// checkBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. -// It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. -func checkBatchCompressedDataCompatibility(b *encoding.Batch) (bool, error) { - batchBytes, err := constructBatchPayload(b.Chunks) - if err != nil { - return false, err - } - blobBytes, err := compressScrollBatchBytes(batchBytes) - if err != nil { - return false, err - } - if err = encoding.CheckCompressedDataCompatibility(blobBytes); err != nil { - log.Warn("CheckBatchCompressedDataCompatibility: compressed data compatibility check failed", "err", err, "batchBytes", hex.EncodeToString(batchBytes), "blobBytes", hex.EncodeToString(blobBytes)) - return false, nil - } - return true, nil -} - -// constructBatchPayload constructs the batch payload. -// This function is only used in compressed batch payload length estimation. -func constructBatchPayload(chunks []*encoding.Chunk) ([]byte, error) { - // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + MaxNumChunks*4 - - // batchBytes represents the raw (un-compressed and un-padded) blob payload - batchBytes := make([]byte, metadataLength) - - // batch metadata: num_chunks - binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks))) - - // encode batch metadata and L2 transactions, - for chunkID, chunk := range chunks { - currentChunkStartIndex := len(batchBytes) - - for _, block := range chunk.Blocks { - for _, tx := range block.Transactions { - if tx.Type == types.L1MessageTxType { - continue - } - - // encode L2 txs into batch payload - rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx, false /* no mock */) - if err != nil { - return nil, err - } - batchBytes = append(batchBytes, rlpTxData...) - } - } - - // batch metadata: chunki_size - if chunkSize := len(batchBytes) - currentChunkStartIndex; chunkSize != 0 { - binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize)) - } - } - return batchBytes, nil -} - -// compressScrollBatchBytes compresses the given batch of bytes. -// The output buffer is allocated with an extra 128 bytes to accommodate metadata overhead or error message. -func compressScrollBatchBytes(batchBytes []byte) ([]byte, error) { - srcSize := C.uint64_t(len(batchBytes)) - outbufSize := C.uint64_t(len(batchBytes) + 128) // Allocate output buffer with extra 128 bytes - outbuf := make([]byte, outbufSize) - - if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize, - (*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil { - return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err)) - } - - return outbuf[:int(outbufSize)], nil -} - -// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. -func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) { - return codecv1.MakeBlobCanonical(blobBytes) -} - -// CalculatePaddedBlobSize calculates the required size on blob storage -// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero. -func CalculatePaddedBlobSize(dataSize uint64) uint64 { - return codecv1.CalculatePaddedBlobSize(dataSize) -} diff --git a/encoding/da.go b/encoding/da.go index 4e88635..b085351 100644 --- a/encoding/da.go +++ b/encoding/da.go @@ -1,12 +1,16 @@ package encoding import ( + "encoding/binary" "fmt" "math/big" + "sync" + "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" ) // BLSModulus is the BLS modulus defined in EIP-4844. @@ -326,3 +330,119 @@ func CheckCompressedDataCompatibility(data []byte) error { return nil } + +// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. +func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) { + // blob contains 131072 bytes but we can only utilize 31/32 of these + if len(blobBytes) > 126976 { + return nil, fmt.Errorf("oversized batch payload, blob bytes length: %v, max length: %v", len(blobBytes), 126976) + } + + // the canonical (padded) blob payload + var blob kzg4844.Blob + + // encode blob payload by prepending every 31 bytes with 1 zero byte + index := 0 + + for from := 0; from < len(blobBytes); from += 31 { + to := from + 31 + if to > len(blobBytes) { + to = len(blobBytes) + } + copy(blob[index+1:], blobBytes[from:to]) + index += 32 + } + + return &blob, nil +} + +var ( + blobDataProofArgs *abi.Arguments + initBlobDataProofArgsOnce sync.Once +) + +// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails. +func GetBlobDataProofArgs() (*abi.Arguments, error) { + var initError error + + initBlobDataProofArgsOnce.Do(func() { + // Initialize bytes32 type + bytes32Type, err := abi.NewType("bytes32", "bytes32", nil) + if err != nil { + initError = fmt.Errorf("failed to initialize abi type bytes32: %w", err) + return + } + + // Initialize bytes48 type + bytes48Type, err := abi.NewType("bytes48", "bytes48", nil) + if err != nil { + initError = fmt.Errorf("failed to initialize abi type bytes48: %w", err) + return + } + + // Successfully create the argument list + blobDataProofArgs = &abi.Arguments{ + {Type: bytes32Type, Name: "z"}, + {Type: bytes32Type, Name: "y"}, + {Type: bytes48Type, Name: "kzg_commitment"}, + {Type: bytes48Type, Name: "kzg_proof"}, + } + }) + + if initError != nil { + return nil, initError + } + + return blobDataProofArgs, nil +} + +// CalculatePaddedBlobSize calculates the required size on blob storage +// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero. +func CalculatePaddedBlobSize(dataSize uint64) uint64 { + paddedSize := (dataSize / 31) * 32 + + if dataSize%31 != 0 { + paddedSize += 1 + dataSize%31 // Add 1 byte for the first empty byte plus the remainder bytes + } + + return paddedSize +} + +// ConstructBatchPayloadInBlob constructs the batch payload. +// This function is only used in compressed batch payload length estimation. +func ConstructBatchPayloadInBlob(chunks []*Chunk, MaxNumChunks uint64) ([]byte, error) { + // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) + metadataLength := 2 + MaxNumChunks*4 + + // batchBytes represents the raw (un-compressed and un-padded) blob payload + batchBytes := make([]byte, metadataLength) + + // batch metadata: num_chunks + binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks))) + + // encode batch metadata and L2 transactions, + for chunkID, chunk := range chunks { + currentChunkStartIndex := len(batchBytes) + + for _, block := range chunk.Blocks { + for _, tx := range block.Transactions { + if tx.Type == types.L1MessageTxType { + continue + } + + // encode L2 txs into batch payload + rlpTxData, err := ConvertTxDataToRLPEncoding(tx, false /* no mock */) + if err != nil { + return nil, err + } + batchBytes = append(batchBytes, rlpTxData...) + } + } + + // batch metadata: chunki_size + if chunkSize := len(batchBytes) - currentChunkStartIndex; chunkSize != 0 { + binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize)) + } + } + return batchBytes, nil +} diff --git a/encoding/codecv2/libscroll_zstd_darwin_arm64.a b/encoding/zstd/libscroll_zstd_darwin_arm64.a similarity index 100% rename from encoding/codecv2/libscroll_zstd_darwin_arm64.a rename to encoding/zstd/libscroll_zstd_darwin_arm64.a diff --git a/encoding/codecv2/libscroll_zstd_darwin_arm64.go b/encoding/zstd/libscroll_zstd_darwin_arm64.go similarity index 81% rename from encoding/codecv2/libscroll_zstd_darwin_arm64.go rename to encoding/zstd/libscroll_zstd_darwin_arm64.go index 8ace74c..d83ec17 100644 --- a/encoding/codecv2/libscroll_zstd_darwin_arm64.go +++ b/encoding/zstd/libscroll_zstd_darwin_arm64.go @@ -1,4 +1,4 @@ -package codecv2 +package zstd /* #cgo LDFLAGS: ${SRCDIR}/libscroll_zstd_darwin_arm64.a diff --git a/encoding/codecv2/libscroll_zstd_linux_amd64.a b/encoding/zstd/libscroll_zstd_linux_amd64.a similarity index 100% rename from encoding/codecv2/libscroll_zstd_linux_amd64.a rename to encoding/zstd/libscroll_zstd_linux_amd64.a diff --git a/encoding/codecv2/libscroll_zstd_linux_amd64.go b/encoding/zstd/libscroll_zstd_linux_amd64.go similarity index 86% rename from encoding/codecv2/libscroll_zstd_linux_amd64.go rename to encoding/zstd/libscroll_zstd_linux_amd64.go index 0b22575..f1a686e 100644 --- a/encoding/codecv2/libscroll_zstd_linux_amd64.go +++ b/encoding/zstd/libscroll_zstd_linux_amd64.go @@ -1,7 +1,7 @@ //go:build !musl // +build !musl -package codecv2 +package zstd /* #cgo LDFLAGS: ${SRCDIR}/libscroll_zstd_linux_amd64.a diff --git a/encoding/codecv2/libscroll_zstd_linux_arm64.a b/encoding/zstd/libscroll_zstd_linux_arm64.a similarity index 100% rename from encoding/codecv2/libscroll_zstd_linux_arm64.a rename to encoding/zstd/libscroll_zstd_linux_arm64.a diff --git a/encoding/codecv2/libscroll_zstd_linux_arm64.go b/encoding/zstd/libscroll_zstd_linux_arm64.go similarity index 86% rename from encoding/codecv2/libscroll_zstd_linux_arm64.go rename to encoding/zstd/libscroll_zstd_linux_arm64.go index ebf3943..f3775d2 100644 --- a/encoding/codecv2/libscroll_zstd_linux_arm64.go +++ b/encoding/zstd/libscroll_zstd_linux_arm64.go @@ -1,7 +1,7 @@ //go:build !musl // +build !musl -package codecv2 +package zstd /* #cgo LDFLAGS: ${SRCDIR}/libscroll_zstd_linux_arm64.a diff --git a/encoding/zstd/zstd.go b/encoding/zstd/zstd.go new file mode 100644 index 0000000..58eab2b --- /dev/null +++ b/encoding/zstd/zstd.go @@ -0,0 +1,26 @@ +package zstd + +/* +#include +char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size); +*/ +import "C" +import ( + "fmt" + "unsafe" +) + +// CompressScrollBatchBytes compresses the given batch of bytes. +// The output buffer is allocated with an extra 128 bytes to accommodate metadata overhead or error message. +func CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) { + srcSize := C.uint64_t(len(batchBytes)) + outbufSize := C.uint64_t(len(batchBytes) + 128) // Allocate output buffer with extra 128 bytes + outbuf := make([]byte, outbufSize) + + if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize, + (*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil { + return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err)) + } + + return outbuf[:int(outbufSize)], nil +}