Skip to content

Commit

Permalink
Add Celestia OSP
Browse files Browse the repository at this point in the history
update preimage oracle logic

Add Celestia Proof

- Extend `ReadInbox` OSP
- Correct behavior for reading invalid batches
- Make tree tarversal iterative
- add tests for appended DA proof

fix config and dockerfile
  • Loading branch information
Ferret-san committed May 28, 2024
1 parent 15e90ef commit dfae469
Show file tree
Hide file tree
Showing 19 changed files with 1,203 additions and 262 deletions.
1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ COPY ./fastcache ./fastcache
COPY ./go-ethereum ./go-ethereum
COPY --from=brotli-wasm-export / target/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/
COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/
COPY --from=contracts-builder workspace/.make/ .make/
RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin
Expand Down
8 changes: 8 additions & 0 deletions arbitrator/prover/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -351,3 +351,11 @@ pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine) -> RustByteArr
pub unsafe extern "C" fn arbitrator_free_proof(proof: RustByteArray) {
drop(Vec::from_raw_parts(proof.ptr, proof.len, proof.capacity))
}

#[no_mangle]
pub unsafe extern "C" fn arbitrator_get_opcode(mach: *mut Machine) -> u16 {
match (*mach).get_next_instruction() {
Some(instruction) => return instruction.opcode.repr(),
None => panic!("Failed to get next opcode for Machine"),
}
}
2 changes: 1 addition & 1 deletion arbitrator/prover/src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2154,7 +2154,6 @@ impl Machine {
);

// End next instruction proof, begin instruction specific serialization

if let Some(next_inst) = func.code.get(self.pc.inst()) {
if matches!(
next_inst.opcode,
Expand Down Expand Up @@ -2336,6 +2335,7 @@ impl Machine {
{
data.push(0); // inbox proof type
data.extend(msg_data);
data.extend(msg_idx.to_be_bytes());
}
} else {
panic!("Should never ever get here");
Expand Down
3 changes: 2 additions & 1 deletion arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,7 @@ func createNodeImpl(
} else if l2Config.ArbitrumChainParams.DataAvailabilityCommittee {
return nil, errors.New("a data availability service is required for this chain, but it was not configured")
} else if config.Celestia.Enable {
celestiaService, err := celestia.NewCelestiaDA(config.Celestia, l1client)
celestiaService, err := celestia.NewCelestiaDA(&config.Celestia, nil)
if err != nil {
return nil, err
}
Expand All @@ -559,6 +559,7 @@ func createNodeImpl(

var statelessBlockValidator *staker.StatelessBlockValidator
if config.BlockValidator.ValidationServerConfigs[0].URL != "" {
// pass blobstream address and L1 connection
statelessBlockValidator, err = staker.NewStatelessBlockValidator(
inboxReader,
inboxTracker,
Expand Down
2 changes: 1 addition & 1 deletion arbstate/das_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ const DASMessageHeaderFlag byte = 0x80

// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer
// which will be used to retrieve data from Celestia
const CelestiaMessageHeaderFlag byte = 0x0c
const CelestiaMessageHeaderFlag byte = 0x63

// TreeDASMessageHeaderFlag indicates that this DAS certificate data employs the new merkelization strategy.
// Ignored when DASMessageHeaderFlag is not set.
Expand Down
14 changes: 9 additions & 5 deletions arbstate/inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ func RecoverPayloadFromCelestiaBatch(
log.Error("Couldn't deserialize Celestia header byte", "err", err)
return nil, nil
}
if !celestia.IsCelestiaMessageHeaderByte(header) {
if !IsCelestiaMessageHeaderByte(header) {
log.Error("Couldn't deserialize Celestia header byte", "err", errors.New("tried to deserialize a message that doesn't have the Celestia header"))
return nil, nil
}
Expand All @@ -426,7 +426,7 @@ func RecoverPayloadFromCelestiaBatch(

blobPointer := celestia.BlobPointer{}
blobBytes := buf.Bytes()
blobPointer.UnmarshalBinary(blobBytes)
err = blobPointer.UnmarshalBinary(blobBytes)
if err != nil {
log.Error("Couldn't unmarshal Celestia blob pointer", "err", err)
return nil, nil
Expand All @@ -438,17 +438,21 @@ func RecoverPayloadFromCelestiaBatch(
return nil, err
}

// we read a batch that is to be discarded, so we return the empty batch
if len(payload) == 0 {
return payload, nil
}

if sha256Preimages != nil {
if squareData == nil {
log.Error("squareData is nil, read from replay binary, but preimages are empty")
return nil, err
}

odsSize := squareData.SquareSize / 2
rowIndex := squareData.StartRow
squareSize := squareData.SquareSize
for _, row := range squareData.Rows {
// half of the squareSize for the EDS gives us the original length of the data
treeConstructor := tree.NewConstructor(recordPreimage, squareSize/2)
treeConstructor := tree.NewConstructor(recordPreimage, odsSize)
root, err := tree.ComputeNmtRoot(treeConstructor, uint(rowIndex), row)
if err != nil {
log.Error("Failed to compute row root", "err", err)
Expand Down
88 changes: 51 additions & 37 deletions cmd/replay/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,9 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *
return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash)
}

if blobPointer.SharesLength == 0 {
return nil, nil, fmt.Errorf("Error, shares length is %v", blobPointer.SharesLength)
}
// first, walk down the merkle tree
leaves, err := tree.MerkleTreeContent(oracle, common.BytesToHash(blobPointer.DataRoot[:]))
if err != nil {
Expand All @@ -171,43 +174,46 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *
// We get the original data square size, wich is (size_of_the_extended_square / 2)
odsSize := squareSize / 2

startRow := blobPointer.Start / squareSize

startIndex := blobPointer.Start % squareSize
startRow := blobPointer.Start / odsSize

firtsRowShares := odsSize - startIndex
var endIndex uint64
var endRow uint64
var remainingShares uint64
var rowsNeeded uint64
if blobPointer.SharesLength <= firtsRowShares {
endIndex = blobPointer.Start + blobPointer.SharesLength - 1
endRow = startRow
} else {
remainingShares = blobPointer.SharesLength - firtsRowShares
rowsNeeded = remainingShares / odsSize
endRow = startRow + rowsNeeded + func() uint64 {
if remainingShares%odsSize > 0 {
return 1
} else {
return 0
}
}()
partialRow := func() bool {
if blobPointer.SharesLength%squareSize > 0 {
return true
} else {
return false
}
}()
if blobPointer.Start >= odsSize*odsSize {
// check that the square isn't just our share (very niche case, should only happens on local testing)
if blobPointer.Start != odsSize*odsSize && odsSize > 1 {
return nil, nil, fmt.Errorf("Error Start Index out of ODS bounds: index=%v odsSize=%v", blobPointer.Start, odsSize)
}
}

if partialRow {
endIndex = endRow*odsSize + (remainingShares%odsSize - 1)
} else {
endIndex = (endRow * odsSize) - 1
// adjusted_end_index = adjusted_start_index + length - 1
if blobPointer.Start+blobPointer.SharesLength < 1 {
return nil, nil, fmt.Errorf("Error getting number of shares in first row: index+length %v > 1", blobPointer.Start+blobPointer.SharesLength)
}
endIndexOds := blobPointer.Start + blobPointer.SharesLength - 1
if endIndexOds >= odsSize*odsSize {
// check that the square isn't just our share (very niche case, should only happens on local testing)
if endIndexOds != odsSize*odsSize && odsSize > 1 {
return nil, nil, fmt.Errorf("Error End Index out of ODS bounds: index=%v odsSize=%v", endIndexOds, odsSize)
}
}
endIndex = endIndex % squareSize
endRow := endIndexOds / odsSize

if endRow > odsSize || startRow > odsSize {
return nil, nil, fmt.Errorf("Error rows out of bounds: startRow=%v endRow=%v odsSize=%v", startRow, endRow, odsSize)
}

startColumn := blobPointer.Start % odsSize
endColumn := endIndexOds % odsSize

if startRow == endRow && startColumn > endColumn+1 {
log.Error("startColumn > endColumn+1 on the same row", "startColumn", startColumn, "endColumn+1 ", endColumn+1)
return []byte{}, nil, nil
}

// adjust the math in the CelestiaPayload function in the inbox

// we can take ods * ods -> end index in ods
// then we check that start index is in bounds, otherwise ignore -> return empty batch
// then we check that end index is in bounds, otherwise ignore

// get rows behind row root and shares for our blob
rows := [][][]byte{}
shares := [][]byte{}
Expand All @@ -220,23 +226,27 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *

odsRow := row[:odsSize]

// TODO explain the logic behind this branching
if startRow == endRow {
shares = append(shares, odsRow[startIndex:endIndex+1]...)
shares = append(shares, odsRow[startColumn:endColumn+1]...)
break
} else if i == startRow {
shares = append(shares, odsRow[startIndex:]...)
shares = append(shares, odsRow[startColumn:]...)
} else if i == endRow {
shares = append(shares, odsRow[:endIndex+1]...)
shares = append(shares, odsRow[:endColumn+1]...)
} else {
shares = append(shares, odsRow...)
}
}

data := []byte{}
if tree.NamespaceSize*2+1 > uint64(len(shares[0])) || tree.NamespaceSize*2+5 > uint64(len(shares[0])) {
return nil, nil, fmt.Errorf("Error getting sequence length on share of size %v", len(shares[0]))
}
sequenceLength := binary.BigEndian.Uint32(shares[0][tree.NamespaceSize*2+1 : tree.NamespaceSize*2+5])
for i, share := range shares {
// trim extra namespace
share := share[29:]
share := share[tree.NamespaceSize:]
if i == 0 {
data = append(data, share[tree.NamespaceSize+5:]...)
continue
Expand All @@ -256,6 +266,10 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *
return data, &squareData, nil
}

func (dasReader *PreimageCelestiaReader) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
return nil, nil
}

// To generate:
// key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001")
// sig, _ := crypto.Sign(make([]byte, 32), key)
Expand Down
45 changes: 0 additions & 45 deletions das/celestia/blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,8 @@ type BlobPointer struct {
BlockHeight uint64
Start uint64
SharesLength uint64
Key uint64
NumLeaves uint64
ProofNonce uint64
TxCommitment [32]byte
DataRoot [32]byte
SideNodes [][32]byte
}

// MarshalBinary encodes the BlobPointer to binary
Expand All @@ -33,15 +29,6 @@ func (b *BlobPointer) MarshalBinary() ([]byte, error) {
if err := binary.Write(buf, binary.BigEndian, b.SharesLength); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, b.Key); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, b.NumLeaves); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, b.ProofNonce); err != nil {
return nil, err
}

// Writing fixed-size byte arrays directly
if _, err := buf.Write(b.TxCommitment[:]); err != nil {
Expand All @@ -51,24 +38,13 @@ func (b *BlobPointer) MarshalBinary() ([]byte, error) {
return nil, err
}

// Writing slice of fixed-size byte arrays
if err := binary.Write(buf, binary.BigEndian, uint64(len(b.SideNodes))); err != nil {
return nil, err
}
for _, sideNode := range b.SideNodes {
if _, err := buf.Write(sideNode[:]); err != nil {
return nil, err
}
}

return buf.Bytes(), nil
}

// UnmarshalBinary decodes the binary to BlobPointer
// serialization format: height + start + end + commitment + data root
func (b *BlobPointer) UnmarshalBinary(data []byte) error {
buf := bytes.NewReader(data)

// Reading fixed-size values
if err := binary.Read(buf, binary.BigEndian, &b.BlockHeight); err != nil {
return err
Expand All @@ -79,15 +55,6 @@ func (b *BlobPointer) UnmarshalBinary(data []byte) error {
if err := binary.Read(buf, binary.BigEndian, &b.SharesLength); err != nil {
return err
}
if err := binary.Read(buf, binary.BigEndian, &b.Key); err != nil {
return err
}
if err := binary.Read(buf, binary.BigEndian, &b.NumLeaves); err != nil {
return err
}
if err := binary.Read(buf, binary.BigEndian, &b.ProofNonce); err != nil {
return err
}

// Reading fixed-size byte arrays directly
if err := readFixedBytes(buf, b.TxCommitment[:]); err != nil {
Expand All @@ -97,18 +64,6 @@ func (b *BlobPointer) UnmarshalBinary(data []byte) error {
return err
}

// Reading slice of fixed-size byte arrays
var sideNodesLen uint64
if err := binary.Read(buf, binary.BigEndian, &sideNodesLen); err != nil {
return err
}
b.SideNodes = make([][32]byte, sideNodesLen)
for i := uint64(0); i < sideNodesLen; i++ {
if err := readFixedBytes(buf, b.SideNodes[i][:]); err != nil {
return err
}
}

return nil
}

Expand Down
Loading

0 comments on commit dfae469

Please sign in to comment.