Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: support conditional compress #23

Merged
merged 12 commits into from
Aug 22, 2024
34 changes: 28 additions & 6 deletions encoding/codecv2/codecv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
}

// blob payload
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, false /* no mock */)
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, false /* no conditional encode */, false /* no mock */)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -119,7 +119,7 @@ func ComputeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}

// ConstructBlobPayload constructs the 4844 blob payload.
func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
func ConstructBlobPayload(chunks []*encoding.Chunk, conditionalEncode bool, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4

Expand Down Expand Up @@ -193,6 +193,20 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
}
}

if conditionalEncode {
encoded := len(blobBytes) < len(batchBytes)
if encoded {
Thegaram marked this conversation as resolved.
Show resolved Hide resolved
blobBytes = append([]byte{1}, blobBytes...)
} else {
blobBytes = append([]byte{0}, batchBytes...)
}
}

if len(blobBytes) > 126976 {
log.Error("ConstructBlobPayload: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes))
return nil, common.Hash{}, nil, errors.New("Blob payload exceeds maximum size")
}

// convert raw data to BLSFieldElements
blob, err := MakeBlobCanonical(blobBytes)
if err != nil {
Expand Down Expand Up @@ -306,7 +320,7 @@ func (b *DABatch) Blob() *kzg4844.Blob {
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) {
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, conditionalEncode bool) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload([]*encoding.Chunk{c})
if err != nil {
return 0, 0, err
Expand All @@ -315,11 +329,15 @@ func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint6
if err != nil {
return 0, 0, err
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
blobBytesLen := uint64(len(blobBytes))
if conditionalEncode {
blobBytesLen += 1
}
Thegaram marked this conversation as resolved.
Show resolved Hide resolved
return uint64(len(batchBytes)), CalculatePaddedBlobSize(blobBytesLen), nil
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint64, error) {
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, conditionalEncode bool) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload(b.Chunks)
if err != nil {
return 0, 0, err
Expand All @@ -328,7 +346,11 @@ func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint6
if err != nil {
return 0, 0, err
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
blobBytesLen := uint64(len(blobBytes))
if conditionalEncode {
blobBytesLen += 1
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(blobBytesLen), nil
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
Expand Down
20 changes: 10 additions & 10 deletions encoding/codecv2/codecv2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ func TestCodecV2BatchStandardTestCases(t *testing.T) {
chunks = append(chunks, chunk)
}

blob, blobVersionedHash, z, err := ConstructBlobPayload(chunks, true /* use mock */)
blob, blobVersionedHash, z, err := ConstructBlobPayload(chunks, false /* no conditional encode */, true /* use mock */)
require.NoError(t, err)
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
Expand Down Expand Up @@ -870,52 +870,52 @@ func TestCodecV2BatchSkipBitmap(t *testing.T) {
func TestCodecV2ChunkAndBatchBlobSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2BatchBytesSize, chunk2BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk2)
chunk2BatchBytesSize, chunk2BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk2, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(412), chunk2BatchBytesSize)
assert.Equal(t, uint64(237), chunk2BlobSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2BatchBytesSize, batch2BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch2)
batch2BatchBytesSize, batch2BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch2, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(412), batch2BatchBytesSize)
assert.Equal(t, uint64(237), batch2BlobSize)

trace3 := readBlockFromJSON(t, "../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3BatchBytesSize, chunk3BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk3)
chunk3BatchBytesSize, chunk3BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk3, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(5863), chunk3BatchBytesSize)
assert.Equal(t, uint64(2933), chunk3BlobSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3BatchBytesSize, batch3BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch3)
batch3BatchBytesSize, batch3BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch3, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(5863), batch3BatchBytesSize)
assert.Equal(t, uint64(2933), batch3BlobSize)

trace4 := readBlockFromJSON(t, "../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4BatchBytesSize, chunk4BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk4)
chunk4BatchBytesSize, chunk4BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk4, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(214), chunk4BatchBytesSize)
assert.Equal(t, uint64(54), chunk4BlobSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
blob4BatchBytesSize, batch4BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch4)
blob4BatchBytesSize, batch4BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch4, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(214), blob4BatchBytesSize)
assert.Equal(t, uint64(54), batch4BlobSize)

chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5BatchBytesSize, chunk5BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk5)
chunk5BatchBytesSize, chunk5BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk5, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(6093), chunk5BatchBytesSize)
assert.Equal(t, uint64(3149), chunk5BlobSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BatchBytesSize, chunk6BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk6)
chunk6BatchBytesSize, chunk6BlobSize, err := EstimateChunkL1CommitBatchSizeAndBlobSize(chunk6, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(214), chunk6BatchBytesSize)
assert.Equal(t, uint64(54), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5BatchBytesSize, batch5BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch5)
batch5BatchBytesSize, batch5BlobSize, err := EstimateBatchL1CommitBatchSizeAndBlobSize(batch5, false /* no conditional encode */)
assert.NoError(t, err)
assert.Equal(t, uint64(6125), batch5BatchBytesSize)
assert.Equal(t, uint64(3186), batch5BlobSize)
Expand Down
16 changes: 8 additions & 8 deletions encoding/codecv3/codecv3.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DACh
}

// NewDABatch creates a DABatch from the provided encoding.Batch.
func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
func NewDABatch(batch *encoding.Batch, conditionalEncode bool) (*DABatch, error) {
// this encoding can only support a fixed number of chunks per batch
if len(batch.Chunks) > MaxNumChunks {
return nil, errors.New("too many chunks in batch")
Expand All @@ -80,7 +80,7 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
}

// blob payload
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, false /* no mock */)
blob, blobVersionedHash, z, err := ConstructBlobPayload(batch.Chunks, conditionalEncode, false /* no mock */)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -118,8 +118,8 @@ func ComputeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}

// ConstructBlobPayload constructs the 4844 blob payload.
func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
return codecv2.ConstructBlobPayload(chunks, useMockTxData)
func ConstructBlobPayload(chunks []*encoding.Chunk, conditionalEncode bool, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
return codecv2.ConstructBlobPayload(chunks, conditionalEncode, useMockTxData)
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
Expand Down Expand Up @@ -232,13 +232,13 @@ func (b *DABatch) Blob() *kzg4844.Blob {
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) {
return codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(c)
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, conditionalEncode bool) (uint64, uint64, error) {
return codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(c, conditionalEncode)
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint64, error) {
return codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(b)
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch, conditionalEncode bool) (uint64, uint64, error) {
return codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(b, conditionalEncode)
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
Expand Down
Loading
Loading