Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 43 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,46 @@ A: `linux/amd64`, `linux/arm64`, `darwin/arm64`. Pull requests for other platfor

**Q: I don't trust `libscroll_zstd*.a` binary files from the repo or these files don't work on my OS/ARCH. How to rebuild them?**

A: Just run `cd libzstd && make libzstd` if your OS/ARCH is supported.
A: To rebuild the libraries for your platform:

1. Build the legacy encoder:

```bash
cd libzstd/encoder-legacy
make install
```

2. Build the standard encoder:

```bash
cd libzstd/encoder-standard
make install
```

3. Add symbol prefixes to avoid conflicts:

```bash
cd encoding/zstd
./add_symbol_prefix.sh
```

**Note**: The symbol prefix script currently only works on macOS. For Linux builds, perform steps 1-2 in Docker, then run step 3 on macOS.

For macOS builds, ensure you have Rust and necessary build tools installed:

```bash
# Install Rust if not already installed
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```

For Linux builds, use Docker with build dependencies:

```bash
# Linux ARM64
docker run -it --rm --platform linux/arm64 -v $(pwd):/workspace -w /workspace rust:1.75-slim bash
apt update && apt install -y build-essential

# Linux AMD64
docker run -it --rm --platform linux/amd64 -v $(pwd):/workspace -w /workspace rust:1.75-slim bash
apt update && apt install -y build-essential
```
4 changes: 4 additions & 0 deletions encoding/codecv0.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,3 +430,7 @@ func (d *DACodecV0) computeBatchDataHash(chunks []*Chunk, totalL1MessagePoppedBe
dataHash := crypto.Keccak256Hash(dataBytes)
return dataHash, nil
}

func (d *DACodecV0) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
return batchBytes, nil
}
13 changes: 9 additions & 4 deletions encoding/codecv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func (d *DACodecV2) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch i
copy(challengePreimage[0:], hash[:])

// blobBytes represents the compressed blob payload (batchBytes)
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return nil, common.Hash{}, nil, nil, common.Hash{}, err
}
Expand Down Expand Up @@ -236,7 +236,7 @@ func (d *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64,
if err != nil {
return 0, 0, fmt.Errorf("failed to construct batch payload in blob: %w", err)
}
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
}
Expand All @@ -249,7 +249,7 @@ func (d *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64,
if err != nil {
return 0, 0, err
}
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return 0, 0, err
}
Expand All @@ -263,7 +263,7 @@ func (d *DACodecV2) checkCompressedDataCompatibility(chunks []*Chunk) (bool, err
if err != nil {
return false, fmt.Errorf("failed to construct batch payload in blob: %w", err)
}
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
}
Expand All @@ -289,3 +289,8 @@ func (d *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error
func (d *DACodecV2) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
return d.checkCompressedDataCompatibility(b.Chunks)
}

// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
func (d *DACodecV2) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
return zstd.CompressScrollBatchBytesLegacy(batchBytes)
}
8 changes: 3 additions & 5 deletions encoding/codecv4.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/log"

"github.com/scroll-tech/da-codec/encoding/zstd"
)

type DACodecV4 struct {
Expand Down Expand Up @@ -205,7 +203,7 @@ func (d *DACodecV4) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch i
if enableCompression {
// blobBytes represents the compressed blob payload (batchBytes)
var err error
blobBytes, err = zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err = d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return nil, common.Hash{}, nil, nil, common.Hash{}, err
}
Expand Down Expand Up @@ -267,7 +265,7 @@ func (d *DACodecV4) estimateL1CommitBatchSizeAndBlobSize(chunks []*Chunk) (uint6
return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
}
if enableCompression {
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return 0, 0, err
}
Expand Down Expand Up @@ -295,7 +293,7 @@ func (d *DACodecV4) checkCompressedDataCompatibility(chunks []*Chunk) (bool, err
if err != nil {
return false, fmt.Errorf("failed to construct batch payload in blob: %w", err)
}
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
if err != nil {
return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
}
Expand Down
28 changes: 19 additions & 9 deletions encoding/codecv7.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,15 @@ import (
"github.com/scroll-tech/da-codec/encoding/zstd"
)

type DACodecV7 struct{}
type DACodecV7 struct {
forcedVersion *CodecVersion
}

// Version returns the codec version.
func (d *DACodecV7) Version() CodecVersion {
if d.forcedVersion != nil {
return *d.forcedVersion
}
return CodecV7
}

Expand Down Expand Up @@ -86,7 +91,7 @@ func (d *DACodecV7) NewDABatch(batch *Batch) (DABatch, error) {
return nil, fmt.Errorf("failed to construct blob: %w", err)
}

daBatch, err := newDABatchV7(CodecV7, batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
daBatch, err := newDABatchV7(d.Version(), batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
if err != nil {
return nil, fmt.Errorf("failed to construct DABatch: %w", err)
}
Expand Down Expand Up @@ -115,7 +120,7 @@ func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []b

sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes)))

blobBytes[blobEnvelopeV7OffsetVersion] = uint8(CodecV7)
blobBytes[blobEnvelopeV7OffsetVersion] = uint8(d.Version())
copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice)
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag
blobBytes = append(blobBytes, payloadBytes...)
Expand Down Expand Up @@ -166,15 +171,15 @@ func (d *DACodecV7) NewDABatchFromBytes(data []byte) (DABatch, error) {
return nil, fmt.Errorf("failed to decode DA batch: %w", err)
}

if daBatch.version != CodecV7 {
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, daBatch.version)
if daBatch.version != d.Version() {
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", d.Version(), daBatch.version)
}

return daBatch, nil
}

func (d *DACodecV7) NewDABatchFromParams(batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash) (DABatch, error) {
return newDABatchV7(CodecV7, batchIndex, blobVersionedHash, parentBatchHash, nil, nil, common.Hash{})
return newDABatchV7(d.Version(), batchIndex, blobVersionedHash, parentBatchHash, nil, nil, common.Hash{})
}

func (d *DACodecV7) DecodeDAChunksRawTx(_ [][]byte) ([]*DAChunkRawTx, error) {
Expand All @@ -186,8 +191,8 @@ func (d *DACodecV7) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) {

// read the blob envelope header
version := rawBytes[blobEnvelopeV7OffsetVersion]
if CodecVersion(version) != CodecV7 {
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, version)
if CodecVersion(version) != d.Version() {
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", d.Version(), version)
}

// read the data size
Expand Down Expand Up @@ -229,7 +234,7 @@ func (d *DACodecV7) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
compressedPayloadBytes, err := zstd.CompressScrollBatchBytes(payloadBytes)
compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes)
if err != nil {
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
}
Expand Down Expand Up @@ -383,3 +388,8 @@ func (d *DACodecV7) JSONFromBytes(data []byte) ([]byte, error) {

return jsonBytes, nil
}

// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
func (d *DACodecV7) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
return zstd.CompressScrollBatchBytesLegacy(batchBytes)
}
Loading