Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: move some common functions to encoding #24

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 4 additions & 83 deletions encoding/codecv1/codecv1.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@ import (
"fmt"
"math/big"
"strings"
"sync"

"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
Expand Down Expand Up @@ -260,7 +258,7 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
copy(challengePreimage[0:], hash[:])

// convert raw data to BLSFieldElements
blob, err := MakeBlobCanonical(blobBytes)
blob, err := encoding.MakeBlobCanonical(blobBytes)
if err != nil {
return nil, common.Hash{}, nil, err
}
Expand Down Expand Up @@ -288,31 +286,6 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
return blob, blobVersionedHash, &z, nil
}

// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
// blob contains 131072 bytes but we can only utilize 31/32 of these
if len(blobBytes) > 126976 {
return nil, fmt.Errorf("oversized batch payload, blob bytes length: %v, max length: %v", len(blobBytes), 126976)
}

// the canonical (padded) blob payload
var blob kzg4844.Blob

// encode blob payload by prepending every 31 bytes with 1 zero byte
index := 0

for from := 0; from < len(blobBytes); from += 31 {
to := from + 31
if to > len(blobBytes) {
to = len(blobBytes)
}
copy(blob[index+1:], blobBytes[from:to])
index += 32
}

return &blob, nil
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
Expand Down Expand Up @@ -379,7 +352,7 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
// | bytes32 | bytes32 | bytes48 | bytes48 |

values := []interface{}{*b.z, y, commitment, proof}
blobDataProofArgs, err := GetBlobDataProofArgs()
blobDataProofArgs, err := encoding.GetBlobDataProofArgs()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err)
}
Expand All @@ -398,7 +371,7 @@ func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
if err != nil {
return 0, err
}
return CalculatePaddedBlobSize(metadataSize + chunkDataSize), nil
return encoding.CalculatePaddedBlobSize(metadataSize + chunkDataSize), nil
}

// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
Expand All @@ -412,7 +385,7 @@ func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
}
batchDataSize += chunkDataSize
}
return CalculatePaddedBlobSize(metadataSize + batchDataSize), nil
return encoding.CalculatePaddedBlobSize(metadataSize + batchDataSize), nil
}

func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
Expand Down Expand Up @@ -550,55 +523,3 @@ func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) uint64 {
}
return totalL1CommitCalldataSize
}

// CalculatePaddedBlobSize calculates the required size on blob storage
// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero.
func CalculatePaddedBlobSize(dataSize uint64) uint64 {
paddedSize := (dataSize / 31) * 32

if dataSize%31 != 0 {
paddedSize += 1 + dataSize%31 // Add 1 byte for the first empty byte plus the remainder bytes
}

return paddedSize
}

var (
blobDataProofArgs *abi.Arguments
initBlobDataProofArgsOnce sync.Once
)

// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails.
func GetBlobDataProofArgs() (*abi.Arguments, error) {
var initError error

initBlobDataProofArgsOnce.Do(func() {
// Initialize bytes32 type
bytes32Type, err := abi.NewType("bytes32", "bytes32", nil)
if err != nil {
initError = fmt.Errorf("failed to initialize abi type bytes32: %w", err)
return
}

// Initialize bytes48 type
bytes48Type, err := abi.NewType("bytes48", "bytes48", nil)
if err != nil {
initError = fmt.Errorf("failed to initialize abi type bytes48: %w", err)
return
}

// Successfully create the argument list
blobDataProofArgs = &abi.Arguments{
{Type: bytes32Type, Name: "z"},
{Type: bytes32Type, Name: "y"},
{Type: bytes48Type, Name: "kzg_commitment"},
{Type: bytes48Type, Name: "kzg_proof"},
}
})

if initError != nil {
return nil, initError
}

return blobDataProofArgs, nil
}
105 changes: 14 additions & 91 deletions encoding/codecv2/codecv2.go
Original file line number Diff line number Diff line change
@@ -1,21 +1,13 @@
package codecv2

/*
#include <stdint.h>
char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size);
*/
import "C"

import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math/big"
"unsafe"

"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
Expand All @@ -24,6 +16,7 @@ import (

"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv1"
"github.com/scroll-tech/da-codec/encoding/zstd"
)

// MaxNumChunks is the maximum number of chunks that a batch can contain.
Expand Down Expand Up @@ -176,7 +169,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
copy(challengePreimage[0:], hash[:])

// blobBytes represents the compressed blob payload (batchBytes)
blobBytes, err := compressScrollBatchBytes(batchBytes)
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
if err != nil {
return nil, common.Hash{}, nil, nil, err
}
Expand All @@ -196,7 +189,7 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
}

// convert raw data to BLSFieldElements
blob, err := MakeBlobCanonical(blobBytes)
blob, err := encoding.MakeBlobCanonical(blobBytes)
if err != nil {
return nil, common.Hash{}, nil, nil, err
}
Expand Down Expand Up @@ -224,11 +217,6 @@ func ConstructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
return blob, blobVersionedHash, &z, blobBytes, nil
}

// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
return codecv1.MakeBlobCanonical(blobBytes)
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
Expand Down Expand Up @@ -295,7 +283,7 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
// | bytes32 | bytes32 | bytes48 | bytes48 |

values := []interface{}{*b.z, y, commitment, proof}
blobDataProofArgs, err := GetBlobDataProofArgs()
blobDataProofArgs, err := encoding.GetBlobDataProofArgs()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err)
}
Expand All @@ -309,38 +297,38 @@ func (b *DABatch) Blob() *kzg4844.Blob {

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload([]*encoding.Chunk{c})
batchBytes, err := encoding.ConstructBatchPayloadInBlob([]*encoding.Chunk{c}, MaxNumChunks)
if err != nil {
return 0, 0, err
}
blobBytes, err := compressScrollBatchBytes(batchBytes)
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
if err != nil {
return 0, 0, err
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
return uint64(len(batchBytes)), encoding.CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func EstimateBatchL1CommitBatchSizeAndBlobSize(b *encoding.Batch) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload(b.Chunks)
batchBytes, err := encoding.ConstructBatchPayloadInBlob(b.Chunks, MaxNumChunks)
if err != nil {
return 0, 0, err
}
blobBytes, err := compressScrollBatchBytes(batchBytes)
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
if err != nil {
return 0, 0, err
}
return uint64(len(batchBytes)), CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
return uint64(len(batchBytes)), encoding.CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
// It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB.
func CheckChunkCompressedDataCompatibility(c *encoding.Chunk) (bool, error) {
batchBytes, err := constructBatchPayload([]*encoding.Chunk{c})
batchBytes, err := encoding.ConstructBatchPayloadInBlob([]*encoding.Chunk{c}, MaxNumChunks)
if err != nil {
return false, err
}
blobBytes, err := compressScrollBatchBytes(batchBytes)
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
if err != nil {
return false, err
}
Expand All @@ -358,11 +346,11 @@ func CheckChunkCompressedDataCompatibility(c *encoding.Chunk) (bool, error) {
// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
// It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB.
func CheckBatchCompressedDataCompatibility(b *encoding.Batch) (bool, error) {
batchBytes, err := constructBatchPayload(b.Chunks)
batchBytes, err := encoding.ConstructBatchPayloadInBlob(b.Chunks, MaxNumChunks)
if err != nil {
return false, err
}
blobBytes, err := compressScrollBatchBytes(batchBytes)
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
if err != nil {
return false, err
}
Expand Down Expand Up @@ -401,68 +389,3 @@ func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 {
func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 {
return codecv1.EstimateBatchL1CommitGas(b)
}

// constructBatchPayload constructs the batch payload.
// This function is only used in compressed batch payload length estimation.
func constructBatchPayload(chunks []*encoding.Chunk) ([]byte, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4

// batchBytes represents the raw (un-compressed and un-padded) blob payload
batchBytes := make([]byte, metadataLength)

// batch metadata: num_chunks
binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks)))

// encode batch metadata and L2 transactions,
for chunkID, chunk := range chunks {
currentChunkStartIndex := len(batchBytes)

for _, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type == types.L1MessageTxType {
continue
}

// encode L2 txs into batch payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx, false /* no mock */)
if err != nil {
return nil, err
}
batchBytes = append(batchBytes, rlpTxData...)
}
}

// batch metadata: chunki_size
if chunkSize := len(batchBytes) - currentChunkStartIndex; chunkSize != 0 {
binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize))
}
}
return batchBytes, nil
}

// compressScrollBatchBytes compresses the given batch of bytes.
// The output buffer is allocated with an extra 128 bytes to accommodate metadata overhead or error message.
func compressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
srcSize := C.uint64_t(len(batchBytes))
outbufSize := C.uint64_t(len(batchBytes) + 128) // Allocate output buffer with extra 128 bytes
outbuf := make([]byte, outbufSize)

if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize,
(*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil {
return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err))
}

return outbuf[:int(outbufSize)], nil
}

// CalculatePaddedBlobSize calculates the required size on blob storage
// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero.
func CalculatePaddedBlobSize(dataSize uint64) uint64 {
return codecv1.CalculatePaddedBlobSize(dataSize)
}

// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails.
func GetBlobDataProofArgs() (*abi.Arguments, error) {
return codecv1.GetBlobDataProofArgs()
}
8 changes: 1 addition & 7 deletions encoding/codecv3/codecv3.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"errors"
"fmt"

"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
Expand Down Expand Up @@ -223,7 +222,7 @@ func (b *DABatch) BlobDataProofForPointEvaluation() ([]byte, error) {
// | bytes32 | bytes32 | bytes48 | bytes48 |

values := []interface{}{*b.z, y, commitment, proof}
blobDataProofArgs, err := GetBlobDataProofArgs()
blobDataProofArgs, err := encoding.GetBlobDataProofArgs()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof args, err: %w", err)
}
Expand Down Expand Up @@ -279,8 +278,3 @@ func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 {
func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 {
return codecv2.EstimateBatchL1CommitGas(b) + 50000 // plus 50000 for the point-evaluation precompile call.
}

// GetBlobDataProofArgs gets the blob data proof arguments for batch commitment and returns error if initialization fails.
func GetBlobDataProofArgs() (*abi.Arguments, error) {
return codecv2.GetBlobDataProofArgs()
}
Loading
Loading