diff --git a/block/block.go b/block/block.go index 02deb391d..c030d1ab5 100644 --- a/block/block.go +++ b/block/block.go @@ -1,45 +1,29 @@ package block import ( - "errors" "fmt" - "github.com/dymensionxyz/gerr-cosmos/gerrc" - errorsmod "cosmossdk.io/errors" "github.com/dymensionxyz/dymint/types" "github.com/dymensionxyz/dymint/types/metrics" ) -// applyBlockWithFraudHandling calls applyBlock and validateBlockBeforeApply with fraud handling. -func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { - validateWithFraud := func() error { - if m.Conf.SkipValidationHeight != block.Header.Height { - if err := m.validateBlockBeforeApply(block, commit); err != nil { - m.blockCache.Delete(block.Header.Height) - // TODO: can we take an action here such as dropping the peer / reducing their reputation? - return fmt.Errorf("block not valid at height %d, dropping it: err:%w", block.Header.Height, err) - } - } - - if err := m.applyBlock(block, commit, blockMetaData); err != nil { - return fmt.Errorf("apply block: %w", err) +// validateAndApplyBlock calls validateBlockBeforeApply and applyBlock. +func (m *Manager) validateAndApplyBlock(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { + if m.Conf.SkipValidationHeight != block.Header.Height { + if err := m.validateBlockBeforeApply(block, commit); err != nil { + m.blockCache.Delete(block.Header.Height) + // TODO: can we take an action here such as dropping the peer / reducing their reputation? + return fmt.Errorf("block not valid at height %d, dropping it: err:%w", block.Header.Height, err) } - - return nil } - err := validateWithFraud() - if errors.Is(err, gerrc.ErrFault) { - // Here we handle the fault by calling the fraud handler. - // FraudHandler is an interface that defines a method to handle faults. Implement this interface to handle faults - // in specific ways. For example, once a fault is detected, it publishes a DataHealthStatus event to the - // pubsub which sets the node in a frozen state. - m.FraudHandler.HandleFault(m.Ctx, err) + if err := m.applyBlock(block, commit, blockMetaData); err != nil { + return fmt.Errorf("apply block: %w", err) } - return err + return nil } // applyBlock applies the block to the store and the abci app. @@ -233,7 +217,7 @@ func (m *Manager) attemptApplyCachedBlocks() error { if cachedBlock.Block.GetRevision() != m.State.GetRevision() { break } - err := m.applyBlockWithFraudHandling(cachedBlock.Block, cachedBlock.Commit, types.BlockMetaData{Source: cachedBlock.Source}) + err := m.validateAndApplyBlock(cachedBlock.Block, cachedBlock.Commit, types.BlockMetaData{Source: cachedBlock.Source}) if err != nil { return fmt.Errorf("apply cached block: expected height: %d: %w", expectedHeight, err) } diff --git a/block/fork.go b/block/fork.go index 8fe76259a..fc5f3fc26 100644 --- a/block/fork.go +++ b/block/fork.go @@ -17,7 +17,7 @@ import ( const ( ForkMonitorInterval = 15 * time.Second - ForkMessage = "rollapp fork detected. please rollback to height previous to rollapp_revision_start_height." + ForkMonitorMessage = "rollapp fork detected. please rollback to height previous to rollapp_revision_start_height." ) // MonitorForkUpdateLoop monitors the hub for fork updates in a loop @@ -26,8 +26,11 @@ func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { defer ticker.Stop() for { - if err := m.checkForkUpdate(ForkMessage); err != nil { + if err := m.checkForkUpdate(ForkMonitorMessage); err != nil { m.logger.Error("Check for update.", err) + if errors.Is(err, ErrNonRecoverable) { + return err + } } select { case <-ctx.Done(): @@ -63,7 +66,9 @@ func (m *Manager) checkForkUpdate(msg string) error { if err != nil { return err } - m.freezeNode(fmt.Errorf("%s local_block_height: %d rollapp_revision_start_height: %d local_revision: %d rollapp_revision: %d", msg, m.State.Height(), expectedRevision.StartHeight, actualRevision, expectedRevision.Number)) + + err = fmt.Errorf("%s local_block_height: %d rollapp_revision_start_height: %d local_revision: %d rollapp_revision: %d", msg, m.State.Height(), expectedRevision.StartHeight, actualRevision, expectedRevision.Number) + return errors.Join(ErrNonRecoverable, err) } return nil @@ -148,7 +153,7 @@ func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Messa return nil, err } - // if binary DRS is obsolete return error (to panic) + // if binary DRS is obsolete return error for _, drs := range obsoleteDRS { if drs == drsVersion { return nil, gerrc.ErrCancelled.Wrapf("obsolete DRS version: %d", drs) @@ -283,7 +288,7 @@ func (m *Manager) doForkWhenNewRevision() error { // this cannot happen. it means the revision number obtained is not the same or the next revision. unable to fork. if expectedRevision.Number != m.State.GetRevision() { - panic("Inconsistent expected revision number from Hub. Unable to fork") + return fmt.Errorf("inconsistent expected revision number from Hub (%d != %d). Unable to fork", expectedRevision.Number, m.State.GetRevision()) } // remove instruction file after fork diff --git a/block/fraud.go b/block/fraud.go index 11a95c493..15ee479e3 100644 --- a/block/fraud.go +++ b/block/fraud.go @@ -1,15 +1,11 @@ package block -import ( - "context" -) - // FraudHandler is an interface that defines a method to handle faults. // Contract: should not be blocking. type FraudHandler interface { // HandleFault handles a fault that occurred in the system. // The fault is passed as an error type. - HandleFault(ctx context.Context, fault error) + HandleFault(fault error) } // FreezeHandler is used to handle faults coming from executing and validating blocks. @@ -18,8 +14,8 @@ type FreezeHandler struct { m *Manager } -func (f FreezeHandler) HandleFault(ctx context.Context, fault error) { - f.m.freezeNode(fault) +func (f FreezeHandler) HandleFault(fault error) { + f.m.StopManager(fault) } func NewFreezeHandler(manager *Manager) *FreezeHandler { diff --git a/block/fraud_test.go b/block/fraud_test.go index 179b95324..502283ccf 100644 --- a/block/fraud_test.go +++ b/block/fraud_test.go @@ -1,10 +1,30 @@ package block_test import ( + "context" "errors" "testing" + "time" + "github.com/dymensionxyz/dymint/block" + "github.com/dymensionxyz/dymint/da" + blockmocks "github.com/dymensionxyz/dymint/mocks/github.com/dymensionxyz/dymint/block" + "github.com/dymensionxyz/dymint/node/events" + "github.com/dymensionxyz/dymint/p2p" + "github.com/dymensionxyz/dymint/settlement" + "github.com/dymensionxyz/dymint/testutil" + "github.com/dymensionxyz/dymint/types" + "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" + "github.com/dymensionxyz/dymint/utils/event" "github.com/dymensionxyz/gerr-cosmos/gerrc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/proxy" ) type mockError struct { @@ -20,6 +40,7 @@ func (mockError) Unwrap() error { return gerrc.ErrFault } +// TODO: should move to gerrc tests func TestErrorIsErrFault(t *testing.T) { err := mockError{name: "test", data: "test"} @@ -33,3 +54,280 @@ func TestErrorIsErrFault(t *testing.T) { t.Error("Expected Is to return false") } } + +func waitForUnhealthy(fraudEventReceived chan *events.DataHealthStatus) func(msg pubsub.Message) { + return func(msg pubsub.Message) { + event, ok := msg.Data().(*events.DataHealthStatus) + if !ok { + return + } + fraudEventReceived <- event + } +} + +func TestP2PBlockWithFraud(t *testing.T) { + // Init app + app := testutil.GetAppMock(testutil.EndBlock) + app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ + RollappParamUpdates: &abci.RollappParams{ + Da: "mock", + DrsVersion: 0, + }, + ConsensusParamUpdates: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 40000000, + MaxBytes: 500000, + }, + }, + }) + // Create proxy app + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(t, err) + manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, proxyApp, nil) + require.NoError(t, err) + require.NotNil(t, manager) + manager.DAClient = testutil.GetMockDALC(log.TestingLogger()) + manager.Retriever = manager.DAClient.(da.BatchRetriever) + + // mock executor that returns ErrFault on ExecuteBlock + mockExecutor := &blockmocks.MockExecutorI{} + manager.Executor = mockExecutor + mockExecutor.On("GetAppInfo").Return(&abci.ResponseInfo{ + LastBlockHeight: int64(0), + }, nil) + mockExecutor.On("ExecuteBlock", mock.Anything, mock.Anything).Return(nil, gerrc.ErrFault) + + // Channel to receive the fraud event + fraudEventReceived := make(chan *events.DataHealthStatus, 1) + _, manager.Cancel = context.WithCancel(context.Background()) + manager.FraudHandler = block.NewFreezeHandler(manager) + + go event.MustSubscribe( + context.Background(), + manager.Pubsub, + "testFraudClient", + events.QueryHealthStatus, + waitForUnhealthy(fraudEventReceived), + log.NewNopLogger(), + ) + + blocks, err := testutil.GenerateBlocks(manager.NextHeightToSubmit(), 1, manager.LocalKey, [32]byte{}) + assert.NoError(t, err) + commits, err := testutil.GenerateCommits(blocks, manager.LocalKey) + assert.NoError(t, err) + + t.Log("Submitting block") + blockData := p2p.BlockData{Block: *blocks[0], Commit: *commits[0]} + msg := pubsub.NewMessage(blockData, map[string][]string{p2p.EventTypeKey: {p2p.EventNewGossipedBlock}}) + manager.OnReceivedBlock(msg) + + select { + case receivedEvent := <-fraudEventReceived: + t.Log("Received fraud event") + if receivedEvent.Error == nil { + t.Error("there should be an error in the event") + } else if !errors.Is(receivedEvent.Error, gerrc.ErrFault) { + t.Errorf("Unexpected error received, expected: %v, got: %v", gerrc.ErrFault, receivedEvent.Error) + } + case <-time.After(2 * time.Second): // time for the fraud event + t.Error("expected to receive a fraud event") + } + + mockExecutor.AssertExpectations(t) +} + +func TestLocalBlockWithFraud(t *testing.T) { + t.Skip("TODO: this not actually testing a local block") + // Init app + app := testutil.GetAppMock(testutil.EndBlock) + app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ + RollappParamUpdates: &abci.RollappParams{ + Da: "mock", + DrsVersion: 0, + }, + ConsensusParamUpdates: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 40000000, + MaxBytes: 500000, + }, + }, + }) + + // Create proxy app + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(t, err) + manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, proxyApp, nil) + require.NoError(t, err) + require.NotNil(t, manager) + manager.DAClient = testutil.GetMockDALC(log.TestingLogger()) + manager.Retriever = manager.DAClient.(da.BatchRetriever) + + numBatchesToAdd := 2 + nextBatchStartHeight := manager.NextHeightToSubmit() + var batch *types.Batch + for i := 0; i < numBatchesToAdd; i++ { + batch, err = testutil.GenerateBatch( + nextBatchStartHeight, + nextBatchStartHeight+uint64(testutil.DefaultTestBatchSize-1), + manager.LocalKey, + [32]byte{}, + ) + assert.NoError(t, err) + + // Save one block on state to enforce local block application + _, err = manager.Store.SaveBlock(batch.Blocks[0], batch.Commits[0], nil) + require.NoError(t, err) + + daResultSubmitBatch := manager.DAClient.SubmitBatch(batch) + assert.Equal(t, daResultSubmitBatch.Code, da.StatusSuccess) + + err = manager.SLClient.SubmitBatch(batch, manager.DAClient.GetClientType(), &daResultSubmitBatch) + require.NoError(t, err) + + nextBatchStartHeight = batch.EndHeight() + 1 + + time.Sleep(time.Millisecond * 500) + } + + // mock executor that returns ErrFault on ExecuteBlock + mockExecutor := &blockmocks.MockExecutorI{} + manager.Executor = mockExecutor + gbdBz, _ := tmjson.Marshal(rollapp.GenesisBridgeData{}) + mockExecutor.On("InitChain", mock.Anything, mock.Anything, mock.Anything).Return(&abci.ResponseInitChain{GenesisBridgeDataBytes: gbdBz}, nil) + mockExecutor.On("GetAppInfo").Return(&abci.ResponseInfo{ + LastBlockHeight: int64(batch.EndHeight()), + }, nil) + mockExecutor.On("UpdateStateAfterInitChain", mock.Anything, mock.Anything).Return(nil) + mockExecutor.On("UpdateMempoolAfterInitChain", mock.Anything).Return(nil) + mockExecutor.On("ExecuteBlock", mock.Anything, mock.Anything).Return(nil, gerrc.ErrFault) + + // Channel to receive the fraud event + fraudEventReceived := make(chan *events.DataHealthStatus, 1) + _, manager.Cancel = context.WithCancel(context.Background()) + manager.FraudHandler = block.NewFreezeHandler(manager) + + go event.MustSubscribe( + context.Background(), + manager.Pubsub, + "testFraudClient", + events.QueryHealthStatus, + waitForUnhealthy(fraudEventReceived), + log.NewNopLogger(), + ) + + // Initially sync target is 0 + assert.Zero(t, manager.LastSettlementHeight.Load()) + assert.True(t, manager.State.Height() == 0) + + // enough time to sync and produce blocks + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + // Capture the error returned by manager.Start. + + errChan := make(chan error, 1) + go func() { + errChan <- manager.Start(ctx) + err := <-errChan + require.Truef(t, errors.Is(err, gerrc.ErrFault), "expected error to be %v, got: %v", gerrc.ErrFault, err) + }() + <-ctx.Done() + + select { + case receivedEvent := <-fraudEventReceived: + t.Log("Received fraud event") + if receivedEvent.Error == nil { + t.Error("there should be an error in the event") + } else if !errors.Is(receivedEvent.Error, gerrc.ErrFault) { + t.Errorf("Unexpected error received, expected: %v, got: %v", gerrc.ErrFault, receivedEvent.Error) + } + case <-time.After(2 * time.Second): // time for the fraud event + t.Error("expected to receive a fraud event") + } + + assert.Equal(t, batch.EndHeight(), manager.LastSettlementHeight.Load()) + mockExecutor.AssertExpectations(t) +} + +// TestApplyBatchFromSLWithFraud tests that the ApplyBatchFromSL function returns an error if the batch is fraudulent +func TestApplyBatchFromSLWithFraud(t *testing.T) { + require := require.New(t) + // Setup app + app := testutil.GetAppMock(testutil.Info, testutil.Commit, testutil.EndBlock) + app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ + RollappParamUpdates: &abci.RollappParams{ + Da: "mock", + DrsVersion: 0, + }, + ConsensusParamUpdates: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 40000000, + MaxBytes: 500000, + }, + }, + }) + // Create proxy app + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(err) + // Create a new mock store which should succeed to save the first block + mockStore := testutil.NewMockStore() + // Init manager + manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, proxyApp, mockStore) + require.NoError(err) + commitHash := [32]byte{1} + manager.DAClient = testutil.GetMockDALC(log.TestingLogger()) + manager.Retriever = manager.DAClient.(da.BatchRetriever) + app.On("Commit", mock.Anything).Return(abci.ResponseCommit{Data: commitHash[:]}) + + // submit batch + nextBatchStartHeight := manager.NextHeightToSubmit() + batch, err := testutil.GenerateBatch( + nextBatchStartHeight, + nextBatchStartHeight+uint64(testutil.DefaultTestBatchSize-1), + manager.LocalKey, + [32]byte{}, + ) + require.NoError(err) + daResultSubmitBatch := manager.DAClient.SubmitBatch(batch) + require.Equal(daResultSubmitBatch.Code, da.StatusSuccess) + err = manager.SLClient.SubmitBatch(batch, manager.DAClient.GetClientType(), &daResultSubmitBatch) + require.NoError(err) + + // Mock Executor to return ErrFraud + mockExecutor := &blockmocks.MockExecutorI{} + manager.Executor = mockExecutor + mockExecutor.On("GetAppInfo").Return(&abci.ResponseInfo{ + LastBlockHeight: int64(batch.EndHeight()), + }, nil) + mockExecutor.On("ExecuteBlock", mock.Anything, mock.Anything).Return(nil, gerrc.ErrFault) + app.On("Commit", mock.Anything).Return(abci.ResponseCommit{Data: commitHash[:]}).Once() + app.On("Info", mock.Anything).Return(abci.ResponseInfo{ + LastBlockHeight: int64(batch.EndHeight()), + LastBlockAppHash: commitHash[:], + }) + + var bds []rollapp.BlockDescriptor + for _, block := range batch.Blocks { + bds = append(bds, rollapp.BlockDescriptor{ + Height: block.Header.Height, + }) + } + slBatch := &settlement.Batch{ + MetaData: &settlement.BatchMetaData{ + DA: daResultSubmitBatch.SubmitMetaData, + }, + BlockDescriptors: bds, + } + + // Call ApplyBatchFromSL + err = manager.ApplyBatchFromSL(slBatch) + + // Verify + require.True(errors.Is(err, gerrc.ErrFault)) + mockExecutor.AssertExpectations(t) +} diff --git a/block/manager.go b/block/manager.go index 578accd64..560de1825 100644 --- a/block/manager.go +++ b/block/manager.go @@ -67,10 +67,7 @@ type Manager struct { // RunMode represents the mode of the node. Set during initialization and shouldn't change after that. RunMode uint - - // context used when freezing node - Cancel context.CancelFunc - Ctx context.Context + Cancel context.CancelFunc // LastSubmissionTime is the time of last batch submitted in SL LastSubmissionTime atomic.Int64 @@ -181,7 +178,6 @@ func NewManager( settlementValidationC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing validation process, but validation height is updated, which means the ongoing validation will validate to the new height. syncedFromSettlement: uchannel.NewNudger(), // used by the sequencer to wait till the node completes the syncing from settlement. } - m.setFraudHandler(NewFreezeHandler(m)) err = m.LoadStateOnInit(store, genesis, logger) if err != nil { return nil, fmt.Errorf("get initial state: %w", err) @@ -205,7 +201,12 @@ func NewManager( // Start starts the block manager. func (m *Manager) Start(ctx context.Context) error { - m.Ctx, m.Cancel = context.WithCancel(ctx) + // create new, cancelable context for the block manager + ctx, m.Cancel = context.WithCancel(ctx) + // set the fraud handler to freeze the node in case of fraud + // TODO: should be called for fullnode only? + m.setFraudHandler(NewFreezeHandler(m)) + // Check if InitChain flow is needed if m.State.IsGenesis() { m.logger.Info("Running InitChain") @@ -261,7 +262,9 @@ func (m *Manager) Start(ctx context.Context) error { // send signal to validation loop with last settlement state update m.triggerSettlementValidation() - eg, ctx := errgroup.WithContext(m.Ctx) + // This error group is used to control the lifetime of the block manager. + // when one of the loops exits with error, the block manager exits + eg, ctx := errgroup.WithContext(ctx) // Start the pruning loop in the background uerrors.ErrGroupGoLog(eg, m.logger, func() error { @@ -289,10 +292,34 @@ func (m *Manager) Start(ctx context.Context) error { // run based on the node role if !amIProposer { - return m.runAsFullNode(ctx, eg) + err = m.runAsFullNode(ctx, eg) + if err != nil { + return err + } + } else { + err = m.runAsProposer(ctx, eg) + if err != nil { + return err + } } - return m.runAsProposer(ctx, eg) + go func() { + err = eg.Wait() + // Check if loops exited due to sequencer rotation signal + if errors.Is(err, errRotationRequested) { + m.rotate(ctx) + } else if errors.Is(err, gerrc.ErrFault) { + // Here we handle the fault by calling the fraud handler. + // it publishes a DataHealthStatus event to the pubsub and stops the block manager. + m.logger.Error("block manager exited with fault", "error", err) + m.FraudHandler.HandleFault(err) + } else if err != nil { + m.logger.Error("block manager exited with error", "error", err) + m.StopManager(err) + } + }() + + return nil } func (m *Manager) NextHeightToSubmit() uint64 { @@ -321,6 +348,12 @@ func (m *Manager) updateFromLastSettlementState() error { return err } + // update latest finalized height + err = m.updateLastFinalizedHeightFromSettlement() + if err != nil { + return fmt.Errorf("sync block manager from settlement: %w", err) + } + m.P2PClient.UpdateLatestSeenHeight(latestBatch.EndHeight) if latestBatch.EndHeight >= m.State.NextHeight() { m.UpdateTargetHeight(latestBatch.EndHeight) @@ -332,8 +365,8 @@ func (m *Manager) updateFromLastSettlementState() error { return nil } +// updateLastFinalizedHeightFromSettlement updates the last finalized height from the Hub func (m *Manager) updateLastFinalizedHeightFromSettlement() error { - // update latest finalized height from SL height, err := m.SLClient.GetLatestFinalizedHeight() if errors.Is(err, gerrc.ErrNotFound) { m.logger.Info("No finalized batches for chain found in SL.") @@ -405,12 +438,17 @@ func (m *Manager) setFraudHandler(handler *FreezeHandler) { m.FraudHandler = handler } -// freezeNode sets the node as unhealthy and prevents the node continues producing and processing blocks -func (m *Manager) freezeNode(err error) { +// StopManager sets the node as unhealthy and stops the block manager context +func (m *Manager) StopManager(err error) { m.logger.Info("Freezing node", "err", err) - if m.Ctx.Err() != nil { - return - } - uevent.MustPublish(m.Ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) + m.setUnhealthy(err) m.Cancel() } + +func (m *Manager) setUnhealthy(err error) { + uevent.MustPublish(context.Background(), m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) +} + +func (m *Manager) setHealthy() { + uevent.MustPublish(context.Background(), m.Pubsub, &events.DataHealthStatus{Error: nil}, events.HealthStatusList) +} diff --git a/block/manager_test.go b/block/manager_test.go index 0d7b4b6bf..45b52d11f 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -3,7 +3,6 @@ package block_test import ( "context" "crypto/rand" - "errors" "sync/atomic" "testing" "time" @@ -12,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/libp2p/go-libp2p/core/crypto" @@ -33,20 +31,16 @@ import ( "github.com/dymensionxyz/dymint/config" "github.com/dymensionxyz/dymint/da" - blockmocks "github.com/dymensionxyz/dymint/mocks/github.com/dymensionxyz/dymint/block" - "github.com/dymensionxyz/dymint/node/events" slregistry "github.com/dymensionxyz/dymint/settlement/registry" "github.com/dymensionxyz/dymint/store" - - "github.com/dymensionxyz/gerr-cosmos/gerrc" - - "github.com/dymensionxyz/dymint/utils/event" ) // TODO: test loading sequencer while rotation in progress // TODO: test sequencer after L2 handover but before last state update submitted // TODO: test halt scenario +// TODO: TestApplyCachedBlocks_WithFraudCheck + func TestInitialState(t *testing.T) { version.DRS = "0" var err error @@ -197,204 +191,15 @@ func TestProduceOnlyAfterSynced(t *testing.T) { defer cancel() // Capture the error returned by manager.Start. - errChan := make(chan error, 1) - go func() { - errChan <- manager.Start(ctx) - err := <-errChan - require.NoError(t, err) - }() + err = manager.Start(ctx) + require.NoError(t, err) <-ctx.Done() + assert.Equal(t, batch.EndHeight(), manager.LastSettlementHeight.Load()) // validate that we produced blocks assert.Greater(t, manager.State.Height(), batch.EndHeight()) } -// TestApplyCachedBlocks checks the flow that happens when we are receiving blocks from p2p and some of the blocks -// are already cached. This means blocks that were gossiped but are bigger than the expected next block height. -// TODO: this test is flaky! https://github.com/dymensionxyz/dymint/issues/1173 -func TestApplyCachedBlocks_WithFraudCheck(t *testing.T) { - // Init app - app := testutil.GetAppMock(testutil.EndBlock) - app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ - RollappParamUpdates: &abci.RollappParams{ - Da: "mock", - DrsVersion: 0, - }, - ConsensusParamUpdates: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 40000000, - MaxBytes: 500000, - }, - }, - }) - // Create proxy app - clientCreator := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) - err := proxyApp.Start() - require.NoError(t, err) - manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, proxyApp, nil) - require.NoError(t, err) - require.NotNil(t, manager) - - t.Log("Taking the manager out of sync by submitting a batch") - manager.DAClient = testutil.GetMockDALC(log.TestingLogger()) - manager.Retriever = manager.DAClient.(da.BatchRetriever) - mockExecutor := &blockmocks.MockExecutorI{} - manager.Executor = mockExecutor - mockExecutor.On("GetAppInfo").Return(&abci.ResponseInfo{ - LastBlockHeight: int64(0), - }, nil) - mockExecutor.On("ExecuteBlock", mock.Anything, mock.Anything).Return(nil, gerrc.ErrFault) - - // Check that handle fault is called - manager.FraudHandler = block.NewFreezeHandler(manager) - fraudEventReceived := make(chan *events.DataHealthStatus, 1) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - manager.Ctx, manager.Cancel = context.WithCancel(context.Background()) - go event.MustSubscribe( - ctx, - manager.Pubsub, - "testFraudClient", - events.QueryHealthStatus, - func(msg pubsub.Message) { - event, ok := msg.Data().(*events.DataHealthStatus) - if !ok { - t.Errorf("Unexpected event type received: %T", msg.Data()) - return - } - fraudEventReceived <- event - }, - log.NewNopLogger(), - ) - - numBatchesToAdd := 1 - nextBatchStartHeight := manager.NextHeightToSubmit() - var batch *types.Batch - for i := 0; i < numBatchesToAdd; i++ { - batch, err = testutil.GenerateBatch(nextBatchStartHeight, nextBatchStartHeight, manager.LocalKey, [32]byte{}) - assert.NoError(t, err) - blockData := p2p.BlockData{Block: *batch.Blocks[0], Commit: *batch.Commits[0]} - msg := pubsub.NewMessage(blockData, map[string][]string{p2p.EventTypeKey: {p2p.EventNewGossipedBlock}}) - if manager.Ctx.Err() == nil { - manager.OnReceivedBlock(msg) - } - // Wait until daHeight is updated - time.Sleep(time.Millisecond * 500) - } - - select { - case receivedEvent := <-fraudEventReceived: - if receivedEvent.Error == nil { - t.Error("there should be an error in the event") - } else if !errors.Is(receivedEvent.Error, gerrc.ErrFault) { - t.Errorf("Unexpected error received, expected: %v, got: %v", gerrc.ErrFault, receivedEvent.Error) - } - case <-time.After(5 * time.Second): - t.Error("timeout waiting for fraud event") - } - - mockExecutor.AssertExpectations(t) -} - -// TestApplyLocalBlock checks the flow that happens when there is a block saved on the Store and we apply it locally. -func TestApplyLocalBlock_WithFraudCheck(t *testing.T) { - // Init app - app := testutil.GetAppMock(testutil.EndBlock) - app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ - RollappParamUpdates: &abci.RollappParams{ - Da: "mock", - DrsVersion: 0, - }, - ConsensusParamUpdates: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 40000000, - MaxBytes: 500000, - }, - }, - }) - - // Create proxy app - clientCreator := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) - err := proxyApp.Start() - require.NoError(t, err) - manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, proxyApp, nil) - require.NoError(t, err) - require.NotNil(t, manager) - t.Log("Taking the manager out of sync by submitting a batch") - manager.DAClient = testutil.GetMockDALC(log.TestingLogger()) - manager.Retriever = manager.DAClient.(da.BatchRetriever) - - numBatchesToAdd := 2 - nextBatchStartHeight := manager.NextHeightToSubmit() - - var batch *types.Batch - for i := 0; i < numBatchesToAdd; i++ { - batch, err = testutil.GenerateBatch( - nextBatchStartHeight, - nextBatchStartHeight+uint64(testutil.DefaultTestBatchSize-1), - manager.LocalKey, - [32]byte{}, - ) - assert.NoError(t, err) - - // Save one block on state to enforce local block application - _, err = manager.Store.SaveBlock(batch.Blocks[0], batch.Commits[0], nil) - require.NoError(t, err) - - daResultSubmitBatch := manager.DAClient.SubmitBatch(batch) - assert.Equal(t, daResultSubmitBatch.Code, da.StatusSuccess) - - err = manager.SLClient.SubmitBatch(batch, manager.DAClient.GetClientType(), &daResultSubmitBatch) - require.NoError(t, err) - - nextBatchStartHeight = batch.EndHeight() + 1 - - time.Sleep(time.Millisecond * 500) - } - - mockExecutor := &blockmocks.MockExecutorI{} - manager.Executor = mockExecutor - gbdBz, _ := tmjson.Marshal(rollapp.GenesisBridgeData{}) - mockExecutor.On("InitChain", mock.Anything, mock.Anything, mock.Anything).Return(&abci.ResponseInitChain{GenesisBridgeDataBytes: gbdBz}, nil) - mockExecutor.On("GetAppInfo").Return(&abci.ResponseInfo{ - LastBlockHeight: int64(batch.EndHeight()), - }, nil) - mockExecutor.On("UpdateStateAfterInitChain", mock.Anything, mock.Anything).Return(nil) - mockExecutor.On("UpdateMempoolAfterInitChain", mock.Anything).Return(nil) - mockExecutor.On("ExecuteBlock", mock.Anything, mock.Anything).Return(nil, gerrc.ErrFault) - - // Check that handle fault is called - mockFraudHandler := &blockmocks.MockFraudHandler{} - manager.FraudHandler = mockFraudHandler - - mockFraudHandler.On("HandleFault", mock.Anything, mock.MatchedBy(func(err error) bool { - return errors.Is(err, gerrc.ErrFault) - })).Return(nil) - - // Initially sync target is 0 - assert.Zero(t, manager.LastSettlementHeight.Load()) - assert.True(t, manager.State.Height() == 0) - - // enough time to sync and produce blocks - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - // Capture the error returned by manager.Start. - - errChan := make(chan error, 1) - go func() { - errChan <- manager.Start(ctx) - err := <-errChan - require.Truef(t, errors.Is(err, gerrc.ErrFault), "expected error to be %v, got: %v", gerrc.ErrFault, err) - }() - <-ctx.Done() - assert.Equal(t, batch.EndHeight(), manager.LastSettlementHeight.Load()) - mockExecutor.AssertExpectations(t) - mockFraudHandler.AssertExpectations(t) -} - func TestRetrieveDaBatchesFailed(t *testing.T) { manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, nil, nil) require.NoError(t, err) @@ -773,94 +578,6 @@ func TestDAFetch(t *testing.T) { } } -// TestManager_ApplyBatchFromSL_FraudHandling tests the case when the manager receives a fraud when the block is part of the batch received from the DA. -func TestManager_ApplyBatchFromSL_FraudHandling(t *testing.T) { - require := require.New(t) - // Setup app - app := testutil.GetAppMock(testutil.Info, testutil.Commit, testutil.EndBlock) - app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ - RollappParamUpdates: &abci.RollappParams{ - Da: "mock", - DrsVersion: 0, - }, - ConsensusParamUpdates: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 40000000, - MaxBytes: 500000, - }, - }, - }) - // Create proxy app - clientCreator := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) - err := proxyApp.Start() - require.NoError(err) - // Create a new mock store which should succeed to save the first block - mockStore := testutil.NewMockStore() - // Init manager - manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, 1, 1, 0, proxyApp, mockStore) - require.NoError(err) - commitHash := [32]byte{1} - manager.DAClient = testutil.GetMockDALC(log.TestingLogger()) - manager.Retriever = manager.DAClient.(da.BatchRetriever) - app.On("Commit", mock.Anything).Return(abci.ResponseCommit{Data: commitHash[:]}) - nextBatchStartHeight := manager.NextHeightToSubmit() - batch, err := testutil.GenerateBatch( - nextBatchStartHeight, - nextBatchStartHeight+uint64(testutil.DefaultTestBatchSize-1), - manager.LocalKey, - [32]byte{}, - ) - require.NoError(err) - daResultSubmitBatch := manager.DAClient.SubmitBatch(batch) - require.Equal(daResultSubmitBatch.Code, da.StatusSuccess) - err = manager.SLClient.SubmitBatch(batch, manager.DAClient.GetClientType(), &daResultSubmitBatch) - require.NoError(err) - - // Mock Executor to return ErrFraud - mockExecutor := &blockmocks.MockExecutorI{} - manager.Executor = mockExecutor - mockExecutor.On("GetAppInfo").Return(&abci.ResponseInfo{ - LastBlockHeight: int64(batch.EndHeight()), - }, nil) - mockExecutor.On("ExecuteBlock", mock.Anything, mock.Anything).Return(nil, gerrc.ErrFault) - - // Check that handle fault is called - mockFraudHandler := &blockmocks.MockFraudHandler{} - manager.FraudHandler = mockFraudHandler - - mockFraudHandler.On("HandleFault", mock.Anything, mock.MatchedBy(func(err error) bool { - return errors.Is(err, gerrc.ErrFault) - })).Return(nil) - - app.On("Commit", mock.Anything).Return(abci.ResponseCommit{Data: commitHash[:]}).Once() - app.On("Info", mock.Anything).Return(abci.ResponseInfo{ - LastBlockHeight: int64(batch.EndHeight()), - LastBlockAppHash: commitHash[:], - }) - - var bds []rollapp.BlockDescriptor - for _, block := range batch.Blocks { - bds = append(bds, rollapp.BlockDescriptor{ - Height: block.Header.Height, - }) - } - slBatch := &settlement.Batch{ - MetaData: &settlement.BatchMetaData{ - DA: daResultSubmitBatch.SubmitMetaData, - }, - BlockDescriptors: bds, - } - - // Call ApplyBatchFromSL - err = manager.ApplyBatchFromSL(slBatch) - - // Verify - require.True(errors.Is(err, gerrc.ErrFault)) - mockExecutor.AssertExpectations(t) - mockFraudHandler.AssertExpectations(t) -} - func TestManager_updateTargetHeight(t *testing.T) { tests := []struct { name string diff --git a/block/modes.go b/block/modes.go index adfd56432..858b70bd1 100644 --- a/block/modes.go +++ b/block/modes.go @@ -2,7 +2,6 @@ package block import ( "context" - "errors" "fmt" "github.com/dymensionxyz/dymint/p2p" @@ -24,11 +23,6 @@ const ( func (m *Manager) runAsFullNode(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "full node") m.RunMode = RunModeFullNode - // update latest finalized height - err := m.updateLastFinalizedHeightFromSettlement() - if err != nil { - return fmt.Errorf("sync block manager from settlement: %w", err) - } // Start the settlement validation loop in the background uerrors.ErrGroupGoLog(eg, m.logger, func() error { @@ -103,17 +97,6 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { return m.MonitorProposerRotation(ctx) }) - go func() { - err = eg.Wait() - // Check if loops exited due to sequencer rotation signal - if errors.Is(err, errRotationRequested) { - m.rotate(ctx) - } else if err != nil { - m.logger.Error("block manager exited with error", "error", err) - m.freezeNode(err) - } - }() - return nil } diff --git a/block/p2p.go b/block/p2p.go index 239a27161..7415baa8a 100644 --- a/block/p2p.go +++ b/block/p2p.go @@ -2,11 +2,13 @@ package block import ( "context" + "errors" "fmt" "github.com/dymensionxyz/dymint/p2p" "github.com/dymensionxyz/dymint/types" "github.com/dymensionxyz/dymint/types/metrics" + "github.com/dymensionxyz/gerr-cosmos/gerrc" "github.com/tendermint/tendermint/libs/pubsub" ) @@ -59,7 +61,17 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { err := m.attemptApplyCachedBlocks() if err != nil { - m.freezeNode(err) + // OnReceivedBlock is a callback, and we can't return an error. + // Therefore we handle the error here as well. + if errors.Is(err, gerrc.ErrFault) { + // Here we handle the fault by calling the fraud handler. + // it publishes a DataHealthStatus event to the pubsub and stops the block manager. + m.logger.Error("block manager exited with fault", "error", err) + m.FraudHandler.HandleFault(err) + } else { + m.logger.Error("block manager exited with error", "error", err) + m.StopManager(err) + } m.logger.Error("Attempt apply cached blocks.", "err", err) } } diff --git a/block/produce.go b/block/produce.go index 5f4c1045d..ffab52c43 100644 --- a/block/produce.go +++ b/block/produce.go @@ -8,9 +8,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" - "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/store" - uevent "github.com/dymensionxyz/dymint/utils/event" tmed25519 "github.com/tendermint/tendermint/crypto/ed25519" cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -27,10 +25,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) m.logger.Info("Started block producer loop.") ticker := time.NewTicker(m.Conf.BlockTime) - defer func() { - ticker.Stop() - m.logger.Info("Stopped block producer loop.") - }() + defer ticker.Stop() var nextEmptyBlock time.Time firstBlock := true @@ -51,19 +46,18 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) block, commit, err := m.ProduceApplyGossipBlock(ctx, ProduceBlockOptions{AllowEmpty: produceEmptyBlock}) if errors.Is(err, context.Canceled) { - m.logger.Error("Produce and gossip: context canceled.", "error", err) return nil } if errors.Is(err, types.ErrEmptyBlock) { // occurs if the block was empty but we don't want to produce one continue } if errors.Is(err, ErrNonRecoverable) { - uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) return err } if err != nil { m.logger.Error("Produce and gossip: uncategorized, assuming recoverable.", "error", err) + // FIXME: should set unhealthy? continue } nextEmptyBlock = time.Now().Add(m.Conf.MaxIdleTime) @@ -82,19 +76,17 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) return nil case bytesProducedC <- bytesProducedN: default: - evt := &events.DataHealthStatus{Error: fmt.Errorf("Block production paused. Time between last block produced and last block submitted higher than max skew time: %s last block in settlement time: %s %w", m.Conf.MaxSkewTime, m.GetLastBlockTimeInSettlement(), gerrc.ErrResourceExhausted)} - uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) + err := fmt.Errorf("Block production paused. Time between last block produced and last block submitted higher than max skew time: %s last block in settlement time: %s %w", m.Conf.MaxSkewTime, m.GetLastBlockTimeInSettlement(), gerrc.ErrResourceExhausted) + m.setUnhealthy(err) m.logger.Error("Pausing block production until new batch is submitted.", "Batch skew time", m.GetBatchSkewTime(), "Max batch skew time", m.Conf.MaxSkewTime, "Last block in settlement time", m.GetLastBlockTimeInSettlement()) select { case <-ctx.Done(): return nil case bytesProducedC <- bytesProducedN: - evt := &events.DataHealthStatus{Error: nil} - uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) + m.setHealthy() m.logger.Info("Resumed block production.") } } - } } } diff --git a/block/retriever.go b/block/retriever.go index 3475bd398..8dae5c02f 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -46,7 +46,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { } // We dont validate because validateBlockBeforeApply already checks if the block is already applied, and we don't need to fail there. - err := m.applyBlockWithFraudHandling(block, batch.Commits[i], types.BlockMetaData{Source: types.DA, DAHeight: slBatch.MetaData.DA.Height}) + err := m.validateAndApplyBlock(block, batch.Commits[i], types.BlockMetaData{Source: types.DA, DAHeight: slBatch.MetaData.DA.Height}) if err != nil { return fmt.Errorf("apply block: height: %d: %w", block.Header.Height, err) } @@ -92,7 +92,7 @@ func (m *Manager) applyLocalBlock() error { return fmt.Errorf("load source: %w", gerrc.ErrNotFound) } - err = m.applyBlockWithFraudHandling(block, commit, types.BlockMetaData{Source: source}) + err = m.validateAndApplyBlock(block, commit, types.BlockMetaData{Source: source}) if err != nil { return fmt.Errorf("apply block from local store: height: %d: %w", height, err) } diff --git a/block/submit.go b/block/submit.go index c57d50a00..bde7bf5f0 100644 --- a/block/submit.go +++ b/block/submit.go @@ -7,6 +7,7 @@ import ( "sync/atomic" "time" + errorsmod "cosmossdk.io/errors" "github.com/dymensionxyz/gerr-cosmos/gerrc" "github.com/tendermint/tendermint/libs/pubsub" "golang.org/x/sync/errgroup" @@ -91,6 +92,8 @@ func SubmitLoopInner( eg.Go(func() error { // 'submitter': this thread actually creates and submits batches. this thread is woken up every batch_submit_time/10 (we used /10 to avoid waiting too much if submission is not required for t-maxBatchSubmitTime, but it maybe required before t) to check if submission is required even if no new blocks have been produced ticker := time.NewTicker(maxBatchSubmitTime / 10) + defer ticker.Stop() + for { select { case <-ctx.Done(): @@ -118,13 +121,13 @@ func SubmitLoopInner( if err != nil { err = fmt.Errorf("create and submit batch: %w", err) if errors.Is(err, gerrc.ErrInternal) { - logger.Error("Create and submit batch", "err", err, "pending", pending) - panic(err) + return errorsmod.Wrap(err, "create and submit batch") } // this could happen if we timed-out waiting for acceptance in the previous iteration, but the batch was indeed submitted. // we panic here cause restarting may reset the last batch submitted counter and the sequencer can potentially resume submitting batches. if errors.Is(err, gerrc.ErrAlreadyExists) { logger.Debug("Batch already accepted", "err", err, "pending", pending) + // TODO: find better, non panic, way to handle this scenario panic(err) } return err diff --git a/block/sync.go b/block/sync.go index 36229fc89..f88014c60 100644 --- a/block/sync.go +++ b/block/sync.go @@ -74,6 +74,7 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { settlementBatch, err := m.SLClient.GetBatchAtHeight(m.State.NextHeight()) if err != nil { + // TODO: should be recoverable. set to unhealthy and continue return fmt.Errorf("retrieve SL batch err: %w", err) } m.logger.Info("Retrieved state update from SL.", "state_index", settlementBatch.StateIndex) @@ -82,9 +83,9 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.LastSubmissionTime.Store(settlementBatch.Batch.CreationTime.UTC().UnixNano()) err = m.ApplyBatchFromSL(settlementBatch.Batch) - // this will keep sync loop alive when DA is down or retrievals are failing because DA issues. if errors.Is(err, da.ErrRetrieval) { + // TODO: set to unhealthy? continue } if err != nil { @@ -96,11 +97,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { // trigger state update validation, after each state update is applied m.triggerSettlementValidation() - err = m.attemptApplyCachedBlocks() - if err != nil { - return fmt.Errorf("Attempt apply cached blocks. err:%w", err) - } + } + // after syncing from SL, attempt to apply cached blocks if any + err := m.attemptApplyCachedBlocks() + if err != nil { + return fmt.Errorf("Attempt apply cached blocks. err:%w", err) } // avoid notifying as synced in case it fails before @@ -109,7 +111,6 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { // nudge to signal to any listens that we're currently synced with the last settlement height we've seen so far m.syncedFromSettlement.Nudge() } - } } } diff --git a/block/validate.go b/block/validate.go index d2a86d07f..18231d5f8 100644 --- a/block/validate.go +++ b/block/validate.go @@ -2,12 +2,8 @@ package block import ( "context" - "errors" - "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/settlement" - uevent "github.com/dymensionxyz/dymint/utils/event" - "github.com/dymensionxyz/gerr-cosmos/gerrc" "github.com/tendermint/tendermint/libs/pubsub" ) @@ -36,27 +32,21 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { // get next batch that needs to be validated from SL batch, err := m.SLClient.GetBatchAtHeight(currH) if err != nil { - uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) + // TODO: should be recoverable. set to unhealthy and continue return err } // validate batch err = m.SettlementValidator.ValidateStateUpdate(batch) if err != nil { - if errors.Is(err, gerrc.ErrFault) { - m.FraudHandler.HandleFault(ctx, err) - } else { - uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) - } return err } // update the last validated height to the batch last block height m.SettlementValidator.UpdateLastValidatedHeight(batch.EndHeight) - m.logger.Debug("state info validated", "lastValidatedHeight", m.SettlementValidator.GetLastValidatedHeight()) + m.logger.Info("state info validated", "idx", batch.StateIndex, "start height", batch.StartHeight, "end height", batch.EndHeight) } - } } } diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go index 7876e41cc..15b8e4ce6 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go @@ -2,11 +2,7 @@ package block -import ( - context "context" - - mock "github.com/stretchr/testify/mock" -) +import mock "github.com/stretchr/testify/mock" // MockFraudHandler is an autogenerated mock type for the FraudHandler type type MockFraudHandler struct { @@ -21,9 +17,9 @@ func (_m *MockFraudHandler) EXPECT() *MockFraudHandler_Expecter { return &MockFraudHandler_Expecter{mock: &_m.Mock} } -// HandleFault provides a mock function with given fields: ctx, fault -func (_m *MockFraudHandler) HandleFault(ctx context.Context, fault error) { - _m.Called(ctx, fault) +// HandleFault provides a mock function with given fields: fault +func (_m *MockFraudHandler) HandleFault(fault error) { + _m.Called(fault) } // MockFraudHandler_HandleFault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleFault' @@ -32,15 +28,14 @@ type MockFraudHandler_HandleFault_Call struct { } // HandleFault is a helper method to define mock.On call -// - ctx context.Context // - fault error -func (_e *MockFraudHandler_Expecter) HandleFault(ctx interface{}, fault interface{}) *MockFraudHandler_HandleFault_Call { - return &MockFraudHandler_HandleFault_Call{Call: _e.mock.On("HandleFault", ctx, fault)} +func (_e *MockFraudHandler_Expecter) HandleFault(fault interface{}) *MockFraudHandler_HandleFault_Call { + return &MockFraudHandler_HandleFault_Call{Call: _e.mock.On("HandleFault", fault)} } -func (_c *MockFraudHandler_HandleFault_Call) Run(run func(ctx context.Context, fault error)) *MockFraudHandler_HandleFault_Call { +func (_c *MockFraudHandler_HandleFault_Call) Run(run func(fault error)) *MockFraudHandler_HandleFault_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(error)) + run(args[0].(error)) }) return _c } @@ -50,7 +45,7 @@ func (_c *MockFraudHandler_HandleFault_Call) Return() *MockFraudHandler_HandleFa return _c } -func (_c *MockFraudHandler_HandleFault_Call) RunAndReturn(run func(context.Context, error)) *MockFraudHandler_HandleFault_Call { +func (_c *MockFraudHandler_HandleFault_Call) RunAndReturn(run func(error)) *MockFraudHandler_HandleFault_Call { _c.Run(run) return _c } diff --git a/testutil/block.go b/testutil/block.go index f60257055..cdaa62ca3 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -129,7 +129,6 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt p2pClient.SetBlockValidator(p2pValidator.BlockValidator()) manager.P2PClient = p2pClient - manager.Ctx = context.Background() if err = p2pClient.Start(context.Background()); err != nil { return nil, err }