Skip to content

Commit

Permalink
update da-codec
Browse files Browse the repository at this point in the history
  • Loading branch information
colinlyguo committed Aug 18, 2024
1 parent 62758c8 commit 6901956
Show file tree
Hide file tree
Showing 8 changed files with 1,480 additions and 125 deletions.
5 changes: 1 addition & 4 deletions encoding/codecv1/codecv1.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,6 @@ import (
"github.com/scroll-tech/da-codec/encoding/codecv0"
)

// BLSModulus is the BLS modulus defined in EIP-4844.
var BLSModulus = new(big.Int).SetBytes(common.FromHex("0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"))

// MaxNumChunks is the maximum number of chunks that a batch can contain.
const MaxNumChunks = 15

Expand Down Expand Up @@ -280,7 +277,7 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484

// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), encoding.BLSModulus)
pointBytes := pointBigInt.Bytes()

// the challenge point z
Expand Down
34 changes: 7 additions & 27 deletions encoding/codecv2/codecv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ import (
"github.com/scroll-tech/da-codec/encoding/codecv1"
)

// BLSModulus is the BLS modulus defined in EIP-4844.
var BLSModulus = codecv1.BLSModulus

// MaxNumChunks is the maximum number of chunks that a batch can contain.
const MaxNumChunks = 45

Expand Down Expand Up @@ -89,7 +86,7 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
}

// blob payload
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, false /* no conditional encode */, false /* no mock */)
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, false /* no mock */)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -119,7 +116,7 @@ func ComputeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}

// ConstructBlobPayload constructs the 4844 blob payload.
func ConstructBlobPayload(chunks []*encoding.Chunk, conditionalEncode bool, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4

Expand Down Expand Up @@ -193,15 +190,6 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, conditionalEncode bool, useM
}
}

if conditionalEncode {
encoded := len(blobBytes) < len(batchBytes)
if encoded {
blobBytes = append([]byte{1}, blobBytes...)
} else {
blobBytes = append([]byte{0}, batchBytes...)
}
}

if len(blobBytes) > 126976 {
log.Error("ConstructBlobPayload: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes))
return nil, common.Hash{}, nil, errors.New("Blob payload exceeds maximum size")
Expand All @@ -225,7 +213,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, conditionalEncode bool, useM

// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), encoding.BLSModulus)
pointBytes := pointBigInt.Bytes()

// the challenge point z
Expand Down Expand Up @@ -320,7 +308,7 @@ func (b *DABatch) Blob() *kzg4844.Blob {
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, conditionalEncode bool) (uint64, uint64, error) {
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload([]*encoding.Chunk{c})
if err != nil {
return 0, 0, err
Expand All @@ -329,15 +317,11 @@ func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, conditionalEnc
if err != nil {
return 0, 0, err
}
blobBytesLen := uint64(len(blobBytes))
if conditionalEncode {
blobBytesLen += 1
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(blobBytesLen), nil
return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, conditionalEncode bool) (uint64, uint64, error) {
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload(b.Chunks)
if err != nil {
return 0, 0, err
Expand All @@ -346,11 +330,7 @@ func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, conditionalEnc
if err != nil {
return 0, 0, err
}
blobBytesLen := uint64(len(blobBytes))
if conditionalEncode {
blobBytesLen += 1
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(blobBytesLen), nil
return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
Expand Down
28 changes: 14 additions & 14 deletions encoding/codecv2/codecv2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,17 @@ func TestCodecV2BlockEncode(t *testing.T) {
encoded = hex.EncodeToString(block.Encode())
assert.Equal(t, "000000000000001100000000646b6ed0000000000000000000000000000000000000000000000000000000000000000000000000007a120001010101", encoded)

// sanity check: v0 and v1 block encodings are identical
// sanity check: v0 and v2 block encodings are identical
for _, trace := range []*encoding.Block{trace2, trace3, trace4, trace5, trace6, trace7} {
blockv0, err := codecv0.NewDABlock(trace, 0)
assert.NoError(t, err)
encodedv0 := hex.EncodeToString(blockv0.Encode())

blockv1, err := NewDABlock(trace, 0)
blockv2, err := NewDABlock(trace, 0)
assert.NoError(t, err)
encodedv1 := hex.EncodeToString(blockv1.Encode())
encodedv2 := hex.EncodeToString(blockv2.Encode())

assert.Equal(t, encodedv0, encodedv1)
assert.Equal(t, encodedv0, encodedv2)
}
}

Expand Down Expand Up @@ -674,7 +674,7 @@ func TestCodecV2BatchStandardTestCases(t *testing.T) {
chunks = append(chunks, chunk)
}

blob, blobVersionedHash, z, err := ConstructBlobPayload(chunks, false /* no conditional encode */, true /* use mock */)
blob, blobVersionedHash, z, err := ConstructBlobPayload(chunks, true /* use mock */)
require.NoError(t, err)
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
Expand Down Expand Up @@ -870,52 +870,52 @@ func TestCodecV2BatchSkipBitmap(t *testing.T) {
func TestCodecV2ChunkAndBatchBlobSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2BatchBytesSize, chunk2BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk2, false /* no conditional encode */)
chunk2BatchBytesSize, chunk2BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk2)
assert.NoError(t, err)
assert.Equal(t, uint64(412), chunk2BatchBytesSize)
assert.Equal(t, uint64(237), chunk2BlobSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2BatchBytesSize, batch2BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch2, false /* no conditional encode */)
batch2BatchBytesSize, batch2BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch2)
assert.NoError(t, err)
assert.Equal(t, uint64(412), batch2BatchBytesSize)
assert.Equal(t, uint64(237), batch2BlobSize)

trace3 := readBlockFromJSON(t, "../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3BatchBytesSize, chunk3BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk3, false /* no conditional encode */)
chunk3BatchBytesSize, chunk3BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk3)
assert.NoError(t, err)
assert.Equal(t, uint64(5863), chunk3BatchBytesSize)
assert.Equal(t, uint64(2933), chunk3BlobSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3BatchBytesSize, batch3BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch3, false /* no conditional encode */)
batch3BatchBytesSize, batch3BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch3)
assert.NoError(t, err)
assert.Equal(t, uint64(5863), batch3BatchBytesSize)
assert.Equal(t, uint64(2933), batch3BlobSize)

trace4 := readBlockFromJSON(t, "../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4BatchBytesSize, chunk4BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk4, false /* no conditional encode */)
chunk4BatchBytesSize, chunk4BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk4)
assert.NoError(t, err)
assert.Equal(t, uint64(214), chunk4BatchBytesSize)
assert.Equal(t, uint64(54), chunk4BlobSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
blob4BatchBytesSize, batch4BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch4, false /* no conditional encode */)
blob4BatchBytesSize, batch4BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch4)
assert.NoError(t, err)
assert.Equal(t, uint64(214), blob4BatchBytesSize)
assert.Equal(t, uint64(54), batch4BlobSize)

chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5BatchBytesSize, chunk5BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk5, false /* no conditional encode */)
chunk5BatchBytesSize, chunk5BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk5)
assert.NoError(t, err)
assert.Equal(t, uint64(6093), chunk5BatchBytesSize)
assert.Equal(t, uint64(3149), chunk5BlobSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BatchBytesSize, chunk6BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk6, false /* no conditional encode */)
chunk6BatchBytesSize, chunk6BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk6)
assert.NoError(t, err)
assert.Equal(t, uint64(214), chunk6BatchBytesSize)
assert.Equal(t, uint64(54), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5BatchBytesSize, batch5BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch5, false /* no conditional encode */)
batch5BatchBytesSize, batch5BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch5)
assert.NoError(t, err)
assert.Equal(t, uint64(6125), batch5BatchBytesSize)
assert.Equal(t, uint64(3186), batch5BlobSize)
Expand Down
16 changes: 8 additions & 8 deletions encoding/codecv3/codecv3.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DACh
}

// NewDABatch creates a DABatch from the provided encoding.Batch.
func NewDABatch(batch *encoding.Batch, conditionalEncode bool) (*DABatch, error) {
func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
// this encoding can only support a fixed number of chunks per batch
if len(batch.Chunks) > MaxNumChunks {
return nil, errors.New("too many chunks in batch")
Expand All @@ -80,7 +80,7 @@ func NewDABatch(batch *encoding.Batch, conditionalEncode bool) (*DABatch, error)
}

// blob payload
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, conditionalEncode, false /* no mock */)
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, false /* no mock */)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -118,8 +118,8 @@ func ComputeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}

// ConstructBlobPayload constructs the 4844 blob payload.
func ConstructBlobPayload(chunks []*encoding.Chunk, conditionalEncode bool, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
return codecv2.ConstructBlobPayload(chunks, conditionalEncode, useMockTxData)
func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
return codecv2.ConstructBlobPayload(chunks, useMockTxData)
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
Expand Down Expand Up @@ -232,13 +232,13 @@ func (b *DABatch) Blob() *kzg4844.Blob {
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, conditionalEncode bool) (uint64, uint64, error) {
return codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(c, conditionalEncode)
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) {
return codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(c)
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, conditionalEncode bool) (uint64, uint64, error) {
return codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(b, conditionalEncode)
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint64, error) {
return codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(b)
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
Expand Down
Loading

0 comments on commit 6901956

Please sign in to comment.