Skip to content

Commit

Permalink
audit fixes
Browse files Browse the repository at this point in the history
hardening 🔧

Update contracts

fixes

Update main.go

Update contracts

isolate dependencies

Update celestia.go

Update .gitmodules

Update main.go
  • Loading branch information
Ferret-san committed May 28, 2024
1 parent dfae469 commit 2b2750e
Show file tree
Hide file tree
Showing 20 changed files with 144 additions and 125 deletions.
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
[submodule "contracts"]
path = contracts
url = https://github.com/celestiaorg/nitro-contracts.git
branch = celestia-v1.2.1
branch = contracts-v1.2.1
[submodule "arbitrator/wasm-testsuite/testsuite"]
path = arbitrator/wasm-testsuite/testsuite
url = https://github.com/WebAssembly/testsuite.git
Expand Down
8 changes: 7 additions & 1 deletion arbitrator/prover/src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2335,7 +2335,13 @@ impl Machine {
{
data.push(0); // inbox proof type
data.extend(msg_data);
data.extend(msg_idx.to_be_bytes());
match inbox_identifier {
InboxIdentifier::Sequencer => {
data.extend(msg_idx.to_be_bytes());
data.push(0x0);
}
InboxIdentifier::Delayed => data.push(0x1),
}
}
} else {
panic!("Should never ever get here");
Expand Down
6 changes: 3 additions & 3 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ import (
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/genericconf"
"github.com/offchainlabs/nitro/das"
"github.com/offchainlabs/nitro/das/celestia"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/util"
Expand Down Expand Up @@ -90,7 +90,7 @@ type BatchPoster struct {
gasRefunderAddr common.Address
building *buildingBatch
daWriter das.DataAvailabilityServiceWriter
celestiaWriter celestia.DataAvailabilityWriter
celestiaWriter celestiaTypes.DataAvailabilityWriter
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
Expand Down Expand Up @@ -272,7 +272,7 @@ type BatchPosterOpts struct {
DeployInfo *chaininfo.RollupAddresses
TransactOpts *bind.TransactOpts
DAWriter das.DataAvailabilityServiceWriter
CelestiaWriter celestia.DataAvailabilityWriter
CelestiaWriter celestiaTypes.DataAvailabilityWriter
ParentChainID *big.Int
}

Expand Down
5 changes: 3 additions & 2 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/das"
"github.com/offchainlabs/nitro/das/celestia"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
Expand Down Expand Up @@ -512,8 +513,8 @@ func createNodeImpl(
var daWriter das.DataAvailabilityServiceWriter
var daReader das.DataAvailabilityServiceReader
var dasLifecycleManager *das.LifecycleManager
var celestiaReader celestia.DataAvailabilityReader
var celestiaWriter celestia.DataAvailabilityWriter
var celestiaReader celestiaTypes.DataAvailabilityReader
var celestiaWriter celestiaTypes.DataAvailabilityWriter
if config.DataAvailability.Enable {
if config.BatchPoster.Enable {
daWriter, daReader, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox)
Expand Down
4 changes: 2 additions & 2 deletions arbstate/das_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (

"github.com/offchainlabs/nitro/arbos/util"
"github.com/offchainlabs/nitro/blsSignatures"
"github.com/offchainlabs/nitro/das/celestia"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/das/dastree"
)

Expand All @@ -26,7 +26,7 @@ type DataAvailabilityReader interface {
}

type CelestiaDataAvailabilityReader interface {
celestia.DataAvailabilityReader
celestiaTypes.DataAvailabilityReader
}

var ErrHashMismatch = errors.New("result does not match expected hash")
Expand Down
10 changes: 5 additions & 5 deletions arbstate/inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ import (
"github.com/offchainlabs/nitro/arbos/arbostypes"
"github.com/offchainlabs/nitro/arbos/l1pricing"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/das/celestia"
"github.com/offchainlabs/nitro/das/celestia/tree"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/das/dastree"
"github.com/offchainlabs/nitro/util/blobs"
"github.com/offchainlabs/nitro/zeroheavy"
Expand Down Expand Up @@ -368,14 +368,14 @@ func (b *dAProviderForBlobReader) RecoverPayloadFromBatch(
return payload, nil
}

func NewDAProviderCelestia(celestia celestia.DataAvailabilityReader) *dAProviderForCelestia {
func NewDAProviderCelestia(celestia celestiaTypes.DataAvailabilityReader) *dAProviderForCelestia {
return &dAProviderForCelestia{
celestia: celestia,
}
}

type dAProviderForCelestia struct {
celestia celestia.DataAvailabilityReader
celestia celestiaTypes.DataAvailabilityReader
}

func (c *dAProviderForCelestia) IsValidHeaderByte(headerByte byte) bool {
Expand All @@ -397,7 +397,7 @@ func RecoverPayloadFromCelestiaBatch(
ctx context.Context,
batchNum uint64,
sequencerMsg []byte,
celestiaReader celestia.DataAvailabilityReader,
celestiaReader celestiaTypes.DataAvailabilityReader,
preimages map[arbutil.PreimageType]map[common.Hash][]byte,
) ([]byte, error) {
var sha256Preimages map[common.Hash][]byte
Expand All @@ -424,7 +424,7 @@ func RecoverPayloadFromCelestiaBatch(
sha256Preimages[key] = value
}

blobPointer := celestia.BlobPointer{}
blobPointer := celestiaTypes.BlobPointer{}
blobBytes := buf.Bytes()
err = blobPointer.UnmarshalBinary(blobBytes)
if err != nil {
Expand Down
29 changes: 9 additions & 20 deletions cmd/replay/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ import (
"github.com/offchainlabs/nitro/arbstate"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/das/celestia"
"github.com/offchainlabs/nitro/das/celestia/tree"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/das/dastree"
"github.com/offchainlabs/nitro/gethhook"
"github.com/offchainlabs/nitro/wavmio"
Expand Down Expand Up @@ -153,7 +153,7 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error {
type PreimageCelestiaReader struct {
}

func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *celestia.BlobPointer) ([]byte, *celestia.SquareData, error) {
func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *celestiaTypes.BlobPointer) ([]byte, *celestiaTypes.SquareData, error) {
oracle := func(hash common.Hash) ([]byte, error) {
return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash)
}
Expand Down Expand Up @@ -196,15 +196,15 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *
}
endRow := endIndexOds / odsSize

if endRow > odsSize || startRow > odsSize {
if endRow >= odsSize || startRow >= odsSize {
return nil, nil, fmt.Errorf("Error rows out of bounds: startRow=%v endRow=%v odsSize=%v", startRow, endRow, odsSize)
}

startColumn := blobPointer.Start % odsSize
endColumn := endIndexOds % odsSize

if startRow == endRow && startColumn > endColumn+1 {
log.Error("startColumn > endColumn+1 on the same row", "startColumn", startColumn, "endColumn+1 ", endColumn+1)
if startRow == endRow && startColumn >= endColumn {
log.Error("start and end row are the same, and startColumn >= endColumn", "startColumn", startColumn, "endColumn+1 ", endColumn+1)
return []byte{}, nil, nil
}

Expand Down Expand Up @@ -255,7 +255,7 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *
}

data = data[:sequenceLength]
squareData := celestia.SquareData{
squareData := celestiaTypes.SquareData{
RowRoots: rowRoots,
ColumnRoots: leaves[squareSize:],
Rows: rows,
Expand Down Expand Up @@ -325,30 +325,19 @@ func main() {
delayedMessagesRead = lastBlockHeader.Nonce.Uint64()
}

// TODO: consider removing this panic
if arbChainParams.DataAvailabilityCommittee && arbChainParams.CelestiaDA {
panic(fmt.Sprintf("Error Multiple DA providers enabled: DAC is %v and CelestiaDA is %v", arbChainParams.DataAvailabilityCommittee, arbChainParams.CelestiaDA))
}

var dasReader arbstate.DataAvailabilityReader
if arbChainParams.DataAvailabilityCommittee {
dasReader = &PreimageDASReader{}
}
var celestiaReader celestia.DataAvailabilityReader
if arbChainParams.CelestiaDA {
celestiaReader = &PreimageCelestiaReader{}
}
backend := WavmInbox{}
var keysetValidationMode = arbstate.KeysetPanicIfInvalid
if backend.GetPositionWithinMessage() > 0 {
keysetValidationMode = arbstate.KeysetDontValidate
}
var daProviders []arbstate.DataAvailabilityProvider
if dasReader != nil {
daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader))
}
if celestiaReader != nil {
daProviders = append(daProviders, arbstate.NewDAProviderCelestia(celestiaReader))
}
daProviders = append(daProviders, arbstate.NewDAProviderDAS(&PreimageDASReader{}))
daProviders = append(daProviders, arbstate.NewDAProviderCelestia(&PreimageCelestiaReader{}))
daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{}))
inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode)
ctx := context.Background()
Expand Down
74 changes: 37 additions & 37 deletions das/celestia/celestia.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/solgen/go/celestiagen"

blobstreamx "github.com/succinctlabs/blobstreamx/bindings"
Expand All @@ -35,7 +36,7 @@ type DAConfig struct {

type ValidatorConfig struct {
TendermintRPC string `koanf:"tendermint-rpc"`
EthClient string `koanf:"eth-ws"`
EthClient string `koanf:"eth-rpc"`
BlobstreamAddr string `koanf:"blobstream"`
}

Expand Down Expand Up @@ -71,7 +72,7 @@ func CelestiaDAConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.String(prefix+".namespace-id", "", "Celestia Namespace to post data to")
f.String(prefix+".auth-token", "", "Auth token for Celestia Node")
f.String(prefix+".validator-config"+".tendermint-rpc", "", "Tendermint RPC endpoint, only used for validation")
f.String(prefix+".validator-config"+".eth-ws", "", "L1 Websocket connection, only used for validation")
f.String(prefix+".validator-config"+".eth-rpc", "", "L1 Websocket connection, only used for validation")
f.String(prefix+".validator-config"+".blobstream", "", "Blobstream address, only used for validation")
}

Expand Down Expand Up @@ -143,9 +144,12 @@ func NewCelestiaDA(cfg *DAConfig, ethClient *ethclient.Client) (*CelestiaDA, err
}, nil
}

// TODO (Diego): add retry logic and gas fee bumps
func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, error) {

// set a 5 minute timeout context on submissions
// if it takes longer than that to succesfully submit and verify a blob,
// then there's an issue with the connection to the celestia node
ctx, cancel := context.WithTimeout(ctx, time.Duration(time.Minute*5))
defer cancel()
dataBlob, err := blob.NewBlobV0(*c.Namespace, message)
if err != nil {
log.Warn("Error creating blob", "err", err)
Expand Down Expand Up @@ -220,7 +224,7 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, error)
return nil, fmt.Errorf("storing Celestia information, odsSize*startRow=%v was larger than blobIndex=%v", odsSize*startRow, blob.Index)
}
startIndexOds := blobIndex - odsSize*startRow
blobPointer := BlobPointer{
blobPointer := types.BlobPointer{
BlockHeight: height,
Start: startIndexOds,
SharesLength: sharesLength,
Expand Down Expand Up @@ -253,16 +257,7 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, error)
return serializedBlobPointerData, nil
}

type SquareData struct {
RowRoots [][]byte
ColumnRoots [][]byte
Rows [][][]byte
SquareSize uint64 // Refers to original data square size
StartRow uint64
EndRow uint64
}

func (c *CelestiaDA) Read(ctx context.Context, blobPointer *BlobPointer) ([]byte, *SquareData, error) {
func (c *CelestiaDA) Read(ctx context.Context, blobPointer *types.BlobPointer) ([]byte, *types.SquareData, error) {
// Wait until our client is synced
err := c.Client.Header.SyncWait(ctx)
if err != nil {
Expand Down Expand Up @@ -333,16 +328,16 @@ func (c *CelestiaDA) Read(ctx context.Context, blobPointer *BlobPointer) ([]byte

endRow := endIndexOds / odsSize

if endRow > odsSize || startRow > odsSize {
log.Error("endRow > odsSize || startRow > odsSize", "endRow", endRow, "startRow", startRow, "odsSize", odsSize)
if endRow >= odsSize || startRow >= odsSize {
log.Error("endRow >= odsSize || startRow >= odsSize", "endRow", endRow, "startRow", startRow, "odsSize", odsSize)
return []byte{}, nil, nil
}

startColumn := blobPointer.Start % odsSize
endColumn := endIndexOds % odsSize

if startRow == endRow && startColumn > endColumn+1 {
log.Error("startColumn > endColumn+1 on the same row", "startColumn", startColumn, "endColumn+1 ", endColumn+1)
if startRow == endRow && startColumn >= endColumn {
log.Error("start and end row are the same and startColumn >= endColumn", "startColumn", startColumn, "endColumn+1 ", endColumn+1)
return []byte{}, nil, nil
}

Expand All @@ -351,12 +346,7 @@ func (c *CelestiaDA) Read(ctx context.Context, blobPointer *BlobPointer) ([]byte
rows = append(rows, eds.Row(uint(i)))
}

printRows := [][][]byte{}
for i := 0; i < int(squareSize); i++ {
printRows = append(printRows, eds.Row(uint(i)))
}

squareData := SquareData{
squareData := types.SquareData{
RowRoots: header.DAH.RowRoots,
ColumnRoots: header.DAH.ColumnRoots,
Rows: rows,
Expand All @@ -373,9 +363,10 @@ func (c *CelestiaDA) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
return nil, fmt.Errorf("no celestia prover config found")
}

fmt.Printf("Inbox Message: %v\n", msg)
buf := bytes.NewBuffer(msg)
msgLength := uint32(len(msg) + 1)
blobPointer := BlobPointer{}
// msgLength := uint32(len(msg) + 1)
blobPointer := types.BlobPointer{}
blobBytes := buf.Bytes()
err := blobPointer.UnmarshalBinary(blobBytes)
if err != nil {
Expand Down Expand Up @@ -405,6 +396,9 @@ func (c *CelestiaDA) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
return nil, err
}

fmt.Printf("Blob Pointer Height: %v\n", blobPointer.BlockHeight)
fmt.Printf("Latest Blobstream Height: %v\n", latestCelestiaBlock)

var backwards bool
if blobPointer.BlockHeight < latestCelestiaBlock {
backwards = true
Expand Down Expand Up @@ -455,7 +449,7 @@ func (c *CelestiaDA) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
log.Info("Verified Celestia Attestation", "height", blobPointer.BlockHeight, "valid", valid)

if valid {
sharesProof, err := c.Prover.Trpc.ProveShares(ctx, blobPointer.BlockHeight, blobPointer.Start, blobPointer.Start+blobPointer.SharesLength-1)
sharesProof, err := c.Prover.Trpc.ProveShares(ctx, blobPointer.BlockHeight, blobPointer.Start, blobPointer.Start+blobPointer.SharesLength)
if err != nil {
log.Error("Unable to get ShareProof", "err", err)
return nil, err
Expand All @@ -482,10 +476,12 @@ func (c *CelestiaDA) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
return nil, err
}

// apend size of batch + proofData
sizeBytes := make([]byte, 4)
binary.BigEndian.PutUint32(sizeBytes, uint32((len(proofData)))+msgLength)
proofData = append(proofData, sizeBytes...)
fmt.Printf("Proof Data: %v\n", proofData)

// // apend size of batch + proofData
// sizeBytes := make([]byte, 4)
// binary.BigEndian.PutUint32(sizeBytes, uint32((len(proofData)))+msgLength)
// proofData = append(proofData, sizeBytes...)

return proofData, nil
}
Expand All @@ -496,12 +492,12 @@ func (c *CelestiaDA) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
func (c *CelestiaDA) filter(ctx context.Context, latestBlock uint64, celestiaHeight uint64, backwards bool) (*blobstreamx.BlobstreamXDataCommitmentStored, error) {
// Geth has a default of 5000 block limit for filters
start := uint64(0)
if latestBlock < 5000 {
start = 0
if latestBlock > 5000 {
start = latestBlock - 5000
}
end := latestBlock

for attempt := 0; attempt < 10; attempt++ {
for attempt := 0; attempt < 11; attempt++ {
eventsIterator, err := c.Prover.BlobstreamX.FilterDataCommitmentStored(
&bind.FilterOpts{
Context: ctx,
Expand Down Expand Up @@ -543,9 +539,13 @@ func (c *CelestiaDA) filter(ctx context.Context, latestBlock uint64, celestiaHei
}

if backwards {
start -= 5000
if start >= 5000 {
start -= 5000
} else {
start = 0
}
if end < 5000 {
end = start + 10
end = start + 1000
} else {
end -= 5000
}
Expand Down
Loading

0 comments on commit 2b2750e

Please sign in to comment.