diff --git a/dataavailability/dataavailability.go b/dataavailability/dataavailability.go index 1ca09a39ef..489c4c4a3d 100644 --- a/dataavailability/dataavailability.go +++ b/dataavailability/dataavailability.go @@ -7,12 +7,11 @@ import ( "github.com/0xPolygonHermez/zkevm-node/etherman/types" "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) -const unexpectedHashTemplate = "missmatch on transaction data for batch num %d. Expected hash %s, actual hash: %s" +const unexpectedHashTemplate = "mismatch on transaction data for batch num %d. Expected hash %s, actual hash: %s" // DataAvailability implements an abstract data availability integration type DataAvailability struct { @@ -60,57 +59,81 @@ func (d *DataAvailability) PostSequence(ctx context.Context, sequences []types.S // 1. From local DB // 2. From Sequencer // 3. From DA backend -func (d *DataAvailability) GetBatchL2Data(batchNum uint64, expectedTransactionsHash common.Hash) ([]byte, error) { - found := true - transactionsData, err := d.state.GetBatchL2DataByNumber(d.ctx, batchNum, nil) - if err != nil { - if err == state.ErrNotFound { - found = false +func (d *DataAvailability) GetBatchL2Data(batchNums []uint64, batchHashes []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) { + if len(batchNums) != len(batchHashes) { + return nil, fmt.Errorf("invalid L2 batch data retrieval arguments, %d != %d", len(batchNums), len(batchHashes)) + } + + data, err := d.localData(batchNums, batchHashes) + if err == nil { + return data, nil + } + + if !d.isTrustedSequencer { + data, err = d.trustedSequencerData(batchNums, batchHashes) + if err != nil { + log.Warnf("trusted sequencer failed to return data for batches %v: %s", batchNums, err.Error()) } else { - return nil, fmt.Errorf("failed to get batch data from state for batch num %d: %w", batchNum, err) + return data, nil } } - actualTransactionsHash := crypto.Keccak256Hash(transactionsData) - if !found || expectedTransactionsHash != actualTransactionsHash { - if found { - log.Warnf(unexpectedHashTemplate, batchNum, expectedTransactionsHash, actualTransactionsHash) - } - if !d.isTrustedSequencer { - log.Info("trying to get data from trusted sequencer") - data, err := d.getDataFromTrustedSequencer(batchNum, expectedTransactionsHash) - if err != nil { - log.Warn("failed to get data from trusted sequencer: %w", err) - } else { - return data, nil - } - } + return d.backend.GetSequence(d.ctx, batchHashes, dataAvailabilityMessage) +} - log.Info("trying to get data from the data availability backend") - data, err := d.backend.GetBatchL2Data(batchNum, expectedTransactionsHash) - if err != nil { - log.Error("failed to get data from the data availability backend: %w", err) - if d.isTrustedSequencer { - return nil, fmt.Errorf("data not found on the local DB nor on any data committee member") - } else { - return nil, fmt.Errorf("data not found on the local DB, nor from the trusted sequencer nor on any data committee member") - } +// localData retrieves batches from local database and returns an error unless all are found +func (d *DataAvailability) localData(numbers []uint64, hashes []common.Hash) ([][]byte, error) { + data, err := d.state.GetBatchL2DataByNumbers(d.ctx, numbers, nil) + if err != nil { + return nil, err + } + var batches [][]byte + for i := 0; i < len(numbers); i++ { + batchNumber := numbers[i] + expectedHash := hashes[i] + batchData, ok := data[batchNumber] + if !ok { + return nil, fmt.Errorf("missing batch %v", batchNumber) + } + actualHash := crypto.Keccak256Hash(batchData) + if actualHash != expectedHash { + err = fmt.Errorf(unexpectedHashTemplate, batchNumber, expectedHash, actualHash) + log.Warnf("wrong local data for hash: %s", err.Error()) + return nil, err + } else { + batches = append(batches, batchData) } - return data, nil } - return transactionsData, nil + return batches, nil } -func (d *DataAvailability) getDataFromTrustedSequencer(batchNum uint64, expectedTransactionsHash common.Hash) ([]byte, error) { - b, err := d.zkEVMClient.BatchByNumber(d.ctx, new(big.Int).SetUint64(batchNum)) +// trustedSequencerData retrieved batch data from the trusted sequencer and returns an error unless all are found +func (d *DataAvailability) trustedSequencerData(batchNums []uint64, expectedHashes []common.Hash) ([][]byte, error) { + if len(batchNums) != len(expectedHashes) { + return nil, fmt.Errorf("invalid arguments, len of batch numbers does not equal length of expected hashes: %d != %d", + len(batchNums), len(expectedHashes)) + } + var nums []*big.Int + for _, n := range batchNums { + nums = append(nums, new(big.Int).SetUint64(n)) + } + batchData, err := d.zkEVMClient.BatchesByNumbers(d.ctx, nums) if err != nil { - return nil, fmt.Errorf("failed to get batch num %d from trusted sequencer: %w", batchNum, err) + return nil, err } - actualTransactionsHash := crypto.Keccak256Hash(b.BatchL2Data) - if expectedTransactionsHash != actualTransactionsHash { - return nil, fmt.Errorf( - unexpectedHashTemplate, batchNum, expectedTransactionsHash, actualTransactionsHash, - ) + if len(batchData) != len(batchNums) { + return nil, fmt.Errorf("missing batch data, expected %d, got %d", len(batchNums), len(batchData)) + } + var result [][]byte + for i := 0; i < len(batchNums); i++ { + number := batchNums[i] + batch := batchData[i] + expectedTransactionsHash := expectedHashes[i] + actualTransactionsHash := crypto.Keccak256Hash(batch.BatchL2Data) + if expectedTransactionsHash != actualTransactionsHash { + return nil, fmt.Errorf(unexpectedHashTemplate, number, expectedTransactionsHash, actualTransactionsHash) + } + result = append(result, batch.BatchL2Data) } - return b.BatchL2Data, nil + return result, nil } diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go index 1d638e03e5..c79dfb645c 100644 --- a/dataavailability/datacommittee/datacommittee.go +++ b/dataavailability/datacommittee/datacommittee.go @@ -20,7 +20,7 @@ import ( "golang.org/x/net/context" ) -const unexpectedHashTemplate = "missmatch on transaction data for batch num %d. Expected hash %s, actual hash: %s" +const unexpectedHashTemplate = "missmatch on transaction data. Expected hash %s, actual hash: %s" // DataCommitteeMember represents a member of the Data Committee type DataCommitteeMember struct { @@ -87,8 +87,22 @@ func (d *DataCommitteeBackend) Init() error { return nil } +// GetSequence gets backend data one hash at a time. This should be optimized on the DAC side to get them all at once. +func (d *DataCommitteeBackend) GetSequence(ctx context.Context, hashes []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) { + // TODO: optimize this on the DAC side by implementing a multi batch retrieve api + var batchData [][]byte + for _, h := range hashes { + data, err := d.GetBatchL2Data(h) + if err != nil { + return nil, err + } + batchData = append(batchData, data) + } + return batchData, nil +} + // GetBatchL2Data returns the data from the DAC. It checks that it matches with the expected hash -func (d *DataCommitteeBackend) GetBatchL2Data(batchNum uint64, hash common.Hash) ([]byte, error) { +func (d *DataCommitteeBackend) GetBatchL2Data(hash common.Hash) ([]byte, error) { intialMember := d.selectedCommitteeMember found := false for !found && intialMember != -1 { @@ -110,7 +124,7 @@ func (d *DataCommitteeBackend) GetBatchL2Data(batchNum uint64, hash common.Hash) actualTransactionsHash := crypto.Keccak256Hash(data) if actualTransactionsHash != hash { unexpectedHash := fmt.Errorf( - unexpectedHashTemplate, batchNum, hash, actualTransactionsHash, + unexpectedHashTemplate, hash, actualTransactionsHash, ) log.Warnf( "error getting data from DAC node %s at %s: %s", diff --git a/dataavailability/interfaces.go b/dataavailability/interfaces.go index 441829fafb..d45b41b358 100644 --- a/dataavailability/interfaces.go +++ b/dataavailability/interfaces.go @@ -10,34 +10,49 @@ import ( "github.com/jackc/pgx/v4" ) +// DABackender is an interface for components that store and retrieve batch data +type DABackender interface { + SequenceRetriever + SequenceSender + // Init initializes the DABackend + Init() error +} + +// SequenceSender is used to send provided sequence of batches +type SequenceSender interface { + // PostSequence sends the sequence data to the data availability backend, and returns the dataAvailabilityMessage + // as expected by the contract + PostSequence(ctx context.Context, batchesData [][]byte) ([]byte, error) +} + +// SequenceRetriever is used to retrieve batch data +type SequenceRetriever interface { + // GetSequence retrieves the sequence data from the data availability backend + GetSequence(ctx context.Context, batchHashes []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) +} + +// === Internal interfaces === + type stateInterface interface { GetBatchL2DataByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]byte, error) + GetBatchL2DataByNumbers(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx) (map[uint64][]byte, error) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) } // BatchDataProvider is used to retrieve batch data type BatchDataProvider interface { // GetBatchL2Data retrieve the data of a batch from the DA backend. The returned data must be the pre-image of the hash - GetBatchL2Data(batchNum uint64, hash common.Hash) ([]byte, error) + GetBatchL2Data(batchNum []uint64, batchHashes []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) } -// SequenceSender is used to send provided sequence of batches -type SequenceSender interface { - // PostSequence sends the sequence data to the data availability backend, and returns the dataAvailabilityMessage - // as expected by the contract - PostSequence(ctx context.Context, batchesData [][]byte) ([]byte, error) -} - -// DABackender is the interface needed to implement in order to -// integrate a DA service -type DABackender interface { +// DataManager is an interface for components that send and retrieve batch data +type DataManager interface { BatchDataProvider SequenceSender - // Init initializes the DABackend - Init() error } // ZKEVMClientTrustedBatchesGetter contains the methods required to interact with zkEVM-RPC type ZKEVMClientTrustedBatchesGetter interface { BatchByNumber(ctx context.Context, number *big.Int) (*types.Batch, error) + BatchesByNumbers(ctx context.Context, numbers []*big.Int) ([]*types.BatchData, error) } diff --git a/etherman/etherman.go b/etherman/etherman.go index af211877aa..967e515022 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -1362,19 +1362,25 @@ func decodeSequencedBatches(smcAbi abi.ABI, txData []byte, forkID uint64, lastBa maxSequenceTimestamp := data[1].(uint64) initSequencedBatchNumber := data[2].(uint64) coinbase := data[3].(common.Address) + dataAvailabilityMessage := (data[4]).([]byte) sequencedBatches := make([]SequencedBatch, len(sequencesValidium)) - for i, seq := range sequencesValidium { + var batchNums []uint64 + var hashes []common.Hash + for i, validiumData := range sequencesValidium { bn := lastBatchNumber - uint64(len(sequencesValidium)-(i+1)) - batchL2Data, err := da.GetBatchL2Data(bn, sequencesValidium[i].TransactionsHash) - if err != nil { - return nil, err - } - + batchNums = append(batchNums, bn) + hashes = append(hashes, validiumData.TransactionsHash) + } + batchL2Data, err := da.GetBatchL2Data(batchNums, hashes, dataAvailabilityMessage) + if err != nil { + return nil, err + } + for i, bn := range batchNums { s := polygonzkevm.PolygonRollupBaseEtrogBatchData{ - Transactions: batchL2Data, // TODO: get data from DA - ForcedGlobalExitRoot: seq.ForcedGlobalExitRoot, - ForcedTimestamp: seq.ForcedTimestamp, - ForcedBlockHashL1: seq.ForcedBlockHashL1, + Transactions: batchL2Data[i], + ForcedGlobalExitRoot: sequencesValidium[i].ForcedGlobalExitRoot, + ForcedTimestamp: sequencesValidium[i].ForcedTimestamp, + ForcedBlockHashL1: sequencesValidium[i].ForcedBlockHashL1, } batch := SequencedBatch{ BatchNumber: bn, @@ -1394,7 +1400,6 @@ func decodeSequencedBatches(smcAbi abi.ABI, txData []byte, forkID uint64, lastBa } sequencedBatches[i] = batch } - return sequencedBatches, nil } diff --git a/etherman/etherman_test.go b/etherman/etherman_test.go index c3e5a16646..728ee7b4b0 100644 --- a/etherman/etherman_test.go +++ b/etherman/etherman_test.go @@ -164,9 +164,12 @@ func TestSequencedBatchesEvent(t *testing.T) { }, polygonzkevm.PolygonValidiumEtrogValidiumBatchData{ TransactionsHash: txsHash, }) - da.Mock.On("GetBatchL2Data", uint64(2), txsHash).Return(data, nil) - da.Mock.On("GetBatchL2Data", uint64(3), txsHash).Return(data, nil) - _, err = etherman.ZkEVM.SequenceBatchesValidium(auth, sequences, uint64(time.Now().Unix()), uint64(1), auth.From, []byte{}) + batchNums := []uint64{2, 3} + batchHashes := []common.Hash{txsHash, txsHash} + batchData := [][]byte{data, data} + daMessage, _ := hex.DecodeString("0x123456789123456789") + da.Mock.On("GetBatchL2Data", batchNums, batchHashes, daMessage).Return(batchData, nil) + _, err = etherman.ZkEVM.SequenceBatchesValidium(auth, sequences, uint64(time.Now().Unix()), uint64(1), auth.From, daMessage) require.NoError(t, err) // Mine the tx in a block @@ -204,9 +207,10 @@ func TestVerifyBatchEvent(t *testing.T) { tx := polygonzkevm.PolygonValidiumEtrogValidiumBatchData{ TransactionsHash: crypto.Keccak256Hash(common.Hex2Bytes(rawTxs)), } - _, err = etherman.ZkEVM.SequenceBatchesValidium(auth, []polygonzkevm.PolygonValidiumEtrogValidiumBatchData{tx}, uint64(time.Now().Unix()), uint64(1), auth.From, nil) + daMessage, _ := hex.DecodeString("0x1234") + _, err = etherman.ZkEVM.SequenceBatchesValidium(auth, []polygonzkevm.PolygonValidiumEtrogValidiumBatchData{tx}, uint64(time.Now().Unix()), uint64(1), auth.From, daMessage) require.NoError(t, err) - da.Mock.On("GetBatchL2Data", uint64(2), crypto.Keccak256Hash(common.Hex2Bytes(rawTxs))).Return(common.Hex2Bytes(rawTxs), nil) + da.Mock.On("GetBatchL2Data", []uint64{2}, []common.Hash{crypto.Keccak256Hash(common.Hex2Bytes(rawTxs))}, daMessage).Return([][]byte{common.Hex2Bytes(rawTxs)}, nil) // Mine the tx in a block ethBackend.Commit() @@ -319,11 +323,11 @@ func TestSendSequences(t *testing.T) { BatchL2Data: batchL2Data, LastL2BLockTimestamp: time.Now().Unix(), } + daMessage, _ := hex.DecodeString("0x1234") lastL2BlockTStamp := tx1.Time().Unix() - // TODO: fix params - tx, err := etherman.sequenceBatches(*auth, []ethmanTypes.Sequence{sequence}, uint64(lastL2BlockTStamp), uint64(1), auth.From, []byte{}) + tx, err := etherman.sequenceBatches(*auth, []ethmanTypes.Sequence{sequence}, uint64(lastL2BlockTStamp), uint64(1), auth.From, daMessage) require.NoError(t, err) - da.Mock.On("GetBatchL2Data", uint64(2), crypto.Keccak256Hash(batchL2Data)).Return(batchL2Data, nil) + da.Mock.On("GetBatchL2Data", []uint64{2}, []common.Hash{crypto.Keccak256Hash(batchL2Data)}, daMessage).Return([][]byte{batchL2Data}, nil) log.Debug("TX: ", tx.Hash()) ethBackend.Commit() diff --git a/etherman/interfaces.go b/etherman/interfaces.go index 07d86de3d7..24b97ee574 100644 --- a/etherman/interfaces.go +++ b/etherman/interfaces.go @@ -3,5 +3,5 @@ package etherman import "github.com/ethereum/go-ethereum/common" type dataAvailabilityProvider interface { - GetBatchL2Data(batchNum uint64, hash common.Hash) ([]byte, error) + GetBatchL2Data(batchNum []uint64, hash []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) } diff --git a/etherman/mock_da.go b/etherman/mock_da.go index cb19f298d2..b6d44d6414 100644 --- a/etherman/mock_da.go +++ b/etherman/mock_da.go @@ -13,29 +13,29 @@ type daMock struct { mock.Mock } -// GetBatchL2Data provides a mock function with given fields: batchNum, hash -func (_m *daMock) GetBatchL2Data(batchNum uint64, hash common.Hash) ([]byte, error) { - ret := _m.Called(batchNum, hash) +// GetBatchL2Data provides a mock function with given fields: batchNum, hash, dataAvailabilityMessage +func (_m *daMock) GetBatchL2Data(batchNum []uint64, hash []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) { + ret := _m.Called(batchNum, hash, dataAvailabilityMessage) if len(ret) == 0 { panic("no return value specified for GetBatchL2Data") } - var r0 []byte + var r0 [][]byte var r1 error - if rf, ok := ret.Get(0).(func(uint64, common.Hash) ([]byte, error)); ok { - return rf(batchNum, hash) + if rf, ok := ret.Get(0).(func([]uint64, []common.Hash, []byte) ([][]byte, error)); ok { + return rf(batchNum, hash, dataAvailabilityMessage) } - if rf, ok := ret.Get(0).(func(uint64, common.Hash) []byte); ok { - r0 = rf(batchNum, hash) + if rf, ok := ret.Get(0).(func([]uint64, []common.Hash, []byte) [][]byte); ok { + r0 = rf(batchNum, hash, dataAvailabilityMessage) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) + r0 = ret.Get(0).([][]byte) } } - if rf, ok := ret.Get(1).(func(uint64, common.Hash) error); ok { - r1 = rf(batchNum, hash) + if rf, ok := ret.Get(1).(func([]uint64, []common.Hash, []byte) error); ok { + r1 = rf(batchNum, hash, dataAvailabilityMessage) } else { r1 = ret.Error(1) } diff --git a/jsonrpc/client/zkevm.go b/jsonrpc/client/zkevm.go index 7bd6be3332..a1937d5edb 100644 --- a/jsonrpc/client/zkevm.go +++ b/jsonrpc/client/zkevm.go @@ -58,6 +58,40 @@ func (c *Client) BatchByNumber(ctx context.Context, number *big.Int) (*types.Bat return result, nil } +// BatchesByNumbers returns batches from the current canonical chain by batch numbers. If the list is empty, the last +// known batch is returned as a list. +func (c *Client) BatchesByNumbers(_ context.Context, numbers []*big.Int) ([]*types.BatchData, error) { + var list []types.BatchNumber + for _, n := range numbers { + list = append(list, types.BatchNumber(n.Int64())) + } + if len(list) == 0 { + list = append(list, types.LatestBatchNumber) + } + + var batchNumbers []string + for _, n := range list { + batchNumbers = append(batchNumbers, n.StringOrHex()) + } + + response, err := JSONRPCCall(c.url, "zkevm_getBatchDataByNumbers", batchNumbers, true) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error.RPCError() + } + + var result *types.BatchDataResult + err = json.Unmarshal(response.Result, &result) + if err != nil { + return nil, err + } + + return result.Data, nil +} + // ExitRootsByGER returns the exit roots accordingly to the provided Global Exit Root func (c *Client) ExitRootsByGER(ctx context.Context, globalExitRoot common.Hash) (*types.ExitRoots, error) { response, err := JSONRPCCall(c.url, "zkevm_getExitRootsByGER", globalExitRoot.String()) diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go index f4c6020ba8..7c3e17a555 100644 --- a/jsonrpc/endpoints_zkevm.go +++ b/jsonrpc/endpoints_zkevm.go @@ -204,6 +204,42 @@ func (z *ZKEVMEndpoints) GetBatchByNumber(batchNumber types.BatchNumber, fullTx }) } +// GetBatchDataByNumbers returns the batch data for batches by numbers +func (z *ZKEVMEndpoints) GetBatchDataByNumbers(filter types.BatchFilter) (interface{}, types.Error) { + return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { + var batchNumbers []uint64 + for _, bn := range filter.Numbers { + n, rpcErr := bn.GetNumericBatchNumber(ctx, z.state, z.etherman, dbTx) + if rpcErr != nil { + return nil, rpcErr + } + batchNumbers = append(batchNumbers, n) + } + + batchesData, err := z.state.GetBatchL2DataByNumbers(ctx, batchNumbers, dbTx) + if errors.Is(err, state.ErrNotFound) { + return nil, nil + } else if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, + fmt.Sprintf("couldn't load batch data from state by numbers %v", filter.Numbers), err, true) + } + + var ret []*types.BatchData + for _, n := range batchNumbers { + data := &types.BatchData{Number: types.ArgUint64(n)} + if b, ok := batchesData[n]; ok { + data.BatchL2Data = b + data.Empty = false + } else { + data.Empty = true + } + ret = append(ret, data) + } + + return types.BatchDataResult{Data: ret}, nil + }) +} + // GetFullBlockByNumber returns information about a block by block number func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx bool) (interface{}, types.Error) { return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { diff --git a/jsonrpc/endpoints_zkevm.openrpc.json b/jsonrpc/endpoints_zkevm.openrpc.json index d795e0f1cb..5875d2fc76 100644 --- a/jsonrpc/endpoints_zkevm.openrpc.json +++ b/jsonrpc/endpoints_zkevm.openrpc.json @@ -166,6 +166,18 @@ } ] }, + { + "name": "zkevm_getBatchDataByNumbers", + "summary": "Gets batch data for a given list of batch numbers", + "params": [ + { + "$ref": "#/components/contentDescriptors/BatchFilter" + } + ], + "result": { + "$ref": "#/components/contentDescriptors/BatchDataResult" + } + }, { "name": "zkevm_getBatchByNumber", "summary": "Gets a batch for a given number", @@ -512,6 +524,27 @@ "$ref": "#/components/schemas/Batch" } }, + "BatchFilter": { + "name": "filter", + "description": "batch filter", + "schema": { + "$ref": "#/components/schemas/BatchFilter" + } + }, + "BatchData": { + "name": "batchData", + "description": "batch data", + "schema": { + "$ref": "#/components/schemas/BatchData" + } + }, + "BatchDataResult": { + "name": "batchDataResult", + "description": "batch data result", + "schema": { + "$ref": "#/components/schemas/BatchDataResult" + } + }, "Block": { "name": "block", "summary": "A block", @@ -1440,4 +1473,4 @@ } } } -} \ No newline at end of file +} diff --git a/jsonrpc/mocks/mock_state.go b/jsonrpc/mocks/mock_state.go index 36f552fe65..05e596d513 100644 --- a/jsonrpc/mocks/mock_state.go +++ b/jsonrpc/mocks/mock_state.go @@ -211,6 +211,36 @@ func (_m *StateMock) GetBatchByNumber(ctx context.Context, batchNumber uint64, d return r0, r1 } +// GetBatchL2DataByNumbers provides a mock function with given fields: ctx, batchNumbers, dbTx +func (_m *StateMock) GetBatchL2DataByNumbers(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx) (map[uint64][]byte, error) { + ret := _m.Called(ctx, batchNumbers, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetBatchL2DataByNumbers") + } + + var r0 map[uint64][]byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint64, pgx.Tx) (map[uint64][]byte, error)); ok { + return rf(ctx, batchNumbers, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint64, pgx.Tx) map[uint64][]byte); ok { + r0 = rf(ctx, batchNumbers, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint64][]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumbers, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBatchTimestamp provides a mock function with given fields: ctx, batchNumber, forcedForkId, dbTx func (_m *StateMock) GetBatchTimestamp(ctx context.Context, batchNumber uint64, forcedForkId *uint64, dbTx pgx.Tx) (*time.Time, error) { ret := _m.Called(ctx, batchNumber, forcedForkId, dbTx) diff --git a/jsonrpc/types/interfaces.go b/jsonrpc/types/interfaces.go index 5c3322a637..bb573744fd 100644 --- a/jsonrpc/types/interfaces.go +++ b/jsonrpc/types/interfaces.go @@ -65,6 +65,7 @@ type StateInterface interface { GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*state.VerifiedBatch, error) GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) + GetBatchL2DataByNumbers(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx) (map[uint64][]byte, error) GetTransactionsByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (txs []types.Transaction, effectivePercentages []uint8, err error) GetVirtualBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.VirtualBatch, error) GetVerifiedBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.VerifiedBatch, error) diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go index b9c902cc1a..1c7bfe5b0b 100644 --- a/jsonrpc/types/types.go +++ b/jsonrpc/types/types.go @@ -446,6 +446,23 @@ func NewBatch(ctx context.Context, st StateInterface, batch *state.Batch, virtua return res, nil } +// BatchFilter is a list of batch numbers to retrieve +type BatchFilter struct { + Numbers []BatchNumber `json:"numbers"` +} + +// BatchData is an abbreviated structure that only contains the number and L2 batch data +type BatchData struct { + Number ArgUint64 `json:"number"` + BatchL2Data ArgBytes `json:"batchL2Data,omitempty"` + Empty bool `json:"empty"` +} + +// BatchDataResult is a list of BatchData for a BatchFilter +type BatchDataResult struct { + Data []*BatchData `json:"data"` +} + // TransactionOrHash for union type of transaction and types.Hash type TransactionOrHash struct { Hash *common.Hash diff --git a/state/interfaces.go b/state/interfaces.go index 87a8fe205e..e4e4511997 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -150,6 +150,7 @@ type storage interface { GetVirtualBatchParentHash(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetForcedBatchParentHash(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetBatchL2DataByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]byte, error) + GetBatchL2DataByNumbers(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx) (map[uint64][]byte, error) GetLatestBatchGlobalExitRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (*common.Hash, error) GetSyncInfoData(ctx context.Context, dbTx pgx.Tx) (SyncInfoDataOnStorage, error) diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index 2ea3a11f1b..7822194e4c 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -1767,6 +1767,66 @@ func (_c *StorageMock_GetBatchL2DataByNumber_Call) RunAndReturn(run func(context return _c } +// GetBatchL2DataByNumbers provides a mock function with given fields: ctx, batchNumbers, dbTx +func (_m *StorageMock) GetBatchL2DataByNumbers(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx) (map[uint64][]byte, error) { + ret := _m.Called(ctx, batchNumbers, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetBatchL2DataByNumbers") + } + + var r0 map[uint64][]byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint64, pgx.Tx) (map[uint64][]byte, error)); ok { + return rf(ctx, batchNumbers, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint64, pgx.Tx) map[uint64][]byte); ok { + r0 = rf(ctx, batchNumbers, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint64][]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumbers, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StorageMock_GetBatchL2DataByNumbers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchL2DataByNumbers' +type StorageMock_GetBatchL2DataByNumbers_Call struct { + *mock.Call +} + +// GetBatchL2DataByNumbers is a helper method to define mock.On call +// - ctx context.Context +// - batchNumbers []uint64 +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) GetBatchL2DataByNumbers(ctx interface{}, batchNumbers interface{}, dbTx interface{}) *StorageMock_GetBatchL2DataByNumbers_Call { + return &StorageMock_GetBatchL2DataByNumbers_Call{Call: _e.mock.On("GetBatchL2DataByNumbers", ctx, batchNumbers, dbTx)} +} + +func (_c *StorageMock_GetBatchL2DataByNumbers_Call) Run(run func(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx)) *StorageMock_GetBatchL2DataByNumbers_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_GetBatchL2DataByNumbers_Call) Return(_a0 map[uint64][]byte, _a1 error) *StorageMock_GetBatchL2DataByNumbers_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StorageMock_GetBatchL2DataByNumbers_Call) RunAndReturn(run func(context.Context, []uint64, pgx.Tx) (map[uint64][]byte, error)) *StorageMock_GetBatchL2DataByNumbers_Call { + _c.Call.Return(run) + return _c +} + // GetBatchNumberOfL2Block provides a mock function with given fields: ctx, blockNumber, dbTx func (_m *StorageMock) GetBatchNumberOfL2Block(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (uint64, error) { ret := _m.Called(ctx, blockNumber, dbTx) diff --git a/state/pgstatestorage/pgstatestorage.go b/state/pgstatestorage/pgstatestorage.go index 090aee86c7..613bfb2d53 100644 --- a/state/pgstatestorage/pgstatestorage.go +++ b/state/pgstatestorage/pgstatestorage.go @@ -358,15 +358,46 @@ func (p *PostgresStorage) GetNativeBlockHashesInRange(ctx context.Context, fromB // GetBatchL2DataByNumber returns the batch L2 data of the given batch number. func (p *PostgresStorage) GetBatchL2DataByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]byte, error) { - const getBatchL2DataByBatchNumber = "SELECT raw_txs_data FROM state.batch WHERE batch_num = $1" - q := p.getExecQuerier(dbTx) - var batchL2Data []byte - err := q.QueryRow(ctx, getBatchL2DataByBatchNumber, batchNumber).Scan(&batchL2Data) + batchData, err := p.GetBatchL2DataByNumbers(ctx, []uint64{batchNumber}, dbTx) + if err != nil { + return nil, err + } + data, ok := batchData[batchNumber] + if !ok { + return nil, state.ErrNotFound + } + return data, nil +} +// GetBatchL2DataByNumbers returns the batch L2 data of the given batch numbers. The data is a union of state.batch and state.forced_batch tables. +func (p *PostgresStorage) GetBatchL2DataByNumbers(ctx context.Context, batchNumbers []uint64, dbTx pgx.Tx) (map[uint64][]byte, error) { + const getBatchL2DataByBatchNumber = ` + SELECT batch_num, raw_txs_data FROM state.batch WHERE batch_num = ANY($1) + UNION + SELECT forced_batch_num, convert_from(decode(raw_txs_data, 'hex'), 'UTF8')::bytea FROM state.forced_batch WHERE forced_batch_num = ANY($2) +` + q := p.getExecQuerier(dbTx) + rows, err := q.Query(ctx, getBatchL2DataByBatchNumber, batchNumbers, batchNumbers) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrNotFound } else if err != nil { return nil, err } - return batchL2Data, nil + defer rows.Close() + batchL2DataMap := make(map[uint64][]byte) + for rows.Next() { + var ( + batchNum uint64 + batchL2Data []byte + ) + err := rows.Scan(&batchNum, &batchL2Data) + if err != nil { + return nil, err + } + batchL2DataMap[batchNum] = batchL2Data + } + if len(batchL2DataMap) == 0 { + return nil, state.ErrNotFound + } + return batchL2DataMap, nil } diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 3aa693b4bf..eeefa3ffc8 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -1157,6 +1157,55 @@ func TestGetBatchL2DataByNumber(t *testing.T) { actualData, err := testState.GetBatchL2DataByNumber(ctx, batchNum, tx) require.NoError(t, err) assert.Equal(t, expectedData, actualData) + + multiGet := []uint64{uint64(4), uint64(5), uint64(6)} + allData, err := testState.GetBatchL2DataByNumbers(ctx, multiGet, tx) + require.NoError(t, err) + require.Equal(t, expectedData, allData[uint64(5)]) +} + +func TestGetBatchL2DataByNumbers(t *testing.T) { + initOrResetDB() + ctx := context.Background() + tx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + defer func() { require.NoError(t, tx.Commit(ctx)) }() + + var i1, i2, i3, i4, i5 = uint64(1), uint64(2), uint64(3), uint64(4), uint64(5) + var d1, d2, d4 = []byte("foobar"), []byte("dingbat"), []byte{0xb} + + const insertBatch = "INSERT INTO state.batch (batch_num, raw_txs_data) VALUES ($1, $2)" + _, err = tx.Exec(ctx, insertBatch, i1, d1) + require.NoError(t, err) + _, err = tx.Exec(ctx, insertBatch, i2, d2) + require.NoError(t, err) + _, err = tx.Exec(ctx, insertBatch, i3, nil) + require.NoError(t, err) + + // Add a forced batch too, needs a block + block1 := *block + block1.BlockNumber = 1000 + err = testState.AddBlock(ctx, &block1, tx) + require.NoError(t, err) + err = tx.Commit(ctx) + require.NoError(t, err) + + tx, err = testState.BeginStateTransaction(ctx) + require.NoError(t, err) + + const insertForcedBatch = "INSERT INTO state.forced_batch (forced_batch_num, timestamp, raw_txs_data, block_num) VALUES (4, now(),'0b', 1000)" + _, err = testState.Exec(ctx, insertForcedBatch) + require.NoError(t, err) + + allData, err := testState.GetBatchL2DataByNumbers(ctx, []uint64{i1, i2, i3, i4, i5}, tx) + require.NoError(t, err) + assert.Equal(t, d1, allData[i1]) + assert.Equal(t, d2, allData[i2]) + assert.Nil(t, allData[i3]) + assert.Equal(t, d4, allData[i4]) + + _, ok := allData[i5] + assert.False(t, ok) } func createL1InfoTreeExitRootStorageEntryForTest(blockNumber uint64, index uint32) *state.L1InfoTreeExitRootStorageEntry { @@ -1360,6 +1409,10 @@ func TestGetForcedBatch(t *testing.T) { require.Equal(t, uint64(2002), fb.BlockNumber) require.Equal(t, "0x717e05de47a87a7d1679e183f1c224150675f6302b7da4eaab526b2b91ae0761", fb.GlobalExitRoot.String()) require.Equal(t, []byte{0xb}, fb.RawTxsData) + + fbData, err := testState.GetBatchL2DataByNumber(ctx, 1, dbTx) + require.NoError(t, err) + require.Equal(t, []byte{0xb}, fbData) } func TestGetLastGER(t *testing.T) { diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 25c95cfbd5..32bbb6feb6 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.5" networks: default: name: zkevm - + services: grafana: container_name: grafana @@ -519,6 +519,8 @@ services: - 50071:50071 # Executor volumes: - ./config/test.prover.config.json:/usr/src/app/config.json + environment: + - EXPERIMENTAL_DOCKER_DESKTOP_FORCE_QEMU=1 command: > zkProver -c /usr/src/app/config.json @@ -628,7 +630,7 @@ services: zkevm-sh: container_name: zkevm-sh image: zkevm-node - stdin_open: true + stdin_open: true tty: true environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db @@ -685,4 +687,4 @@ services: command: - "postgres" - "-N" - - "500" \ No newline at end of file + - "500"