From eebbdc50d84c2d084626b1f7df247bb9af84926e Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sun, 19 Jan 2025 17:57:13 +0530 Subject: [PATCH 01/60] init: columns to the block/eth2 processor --- .../gossip_processing/block_processor.nim | 92 ++++++++++++-- .../gossip_processing/eth2_processor.nim | 114 +++++++++++++++++- beacon_chain/spec/peerdas_helpers.nim | 103 +++++++++++++++- 3 files changed, 289 insertions(+), 20 deletions(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index ec33cd324e..77a1d2485d 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -9,7 +9,9 @@ import chronicles, chronos, metrics, - ../spec/[forks, helpers_el, signatures, signatures_batch], + ../spec/[ + forks, helpers_el, signatures, signatures_batch, + peerdas_helpers], ../sszdump from std/deques import Deque, addLast, contains, initDeque, items, len, shrink @@ -29,10 +31,14 @@ from ../consensus_object_pools/block_quarantine import addBlobless, addOrphan, addUnviable, pop, removeOrphan from ../consensus_object_pools/blob_quarantine import BlobQuarantine, hasBlobs, popBlobs, put +from ../consensus_object_pools/data_column_quarantine import + DataColumnQuarantine, hasMissingDataColumns, hasEnoughDataColumns, + popDataColumns, put from ../validators/validator_monitor import MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock, registerSyncAggregateInBlock -from ../beacon_chain_db import getBlobSidecar, putBlobSidecar +from ../beacon_chain_db import getBlobSidecar, putBlobSidecar, + getDataColumnSidecar, putDataColumnSidecar from ../spec/state_transition_block import validate_blobs export sszdump, signatures_batch @@ -57,6 +63,7 @@ type BlockEntry = object blck*: ForkedSignedBeaconBlock blobs*: Opt[BlobSidecars] + columns*: Opt[DataColumnSidecars] maybeFinalized*: bool ## The block source claims the block has been finalized already resfut*: Future[Result[void, VerifierError]].Raising([CancelledError]) @@ -101,6 +108,7 @@ type getBeaconTime: GetBeaconTimeFn blobQuarantine: ref BlobQuarantine + dataColumnQuarantine: ref DataColumnQuarantine verifier: BatchVerifier lastPayload: Slot @@ -173,7 +181,9 @@ from ../consensus_object_pools/block_clearance import proc storeBackfillBlock( self: var BlockProcessor, signedBlock: ForkySignedBeaconBlock, - blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = + blobsOpt: Opt[BlobSidecars], + dataColumnsOpt: Opt[DataColumnSidecars]): + Result[void, VerifierError] = # The block is certainly not missing any more self.consensusManager.quarantine[].missing.del(signedBlock.root) @@ -201,6 +211,47 @@ proc storeBackfillBlock( if not blobsOk: return err(VerifierError.Invalid) + var columnsOk = true + when typeof(signedBlock).kind >= ConsensusFork.Fulu: + var malformed_cols: seq[int] + if dataColumnsOpt.isSome: + let columns = dataColumnsOpt.get() + let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq + if columns.len > 0 and kzgCommits.len > 0: + for i in 0..= (NUMBER_OF_COLUMNS div 2): + let + recovered_cps = + recover_cells_and_proofs(columns.mapIt(it[])) + recovered_columns = + signedBlock.get_data_column_sidecars(recovered_cps.get) + + for mc in malformed_cols: + # copy the healed columns only into the + # sidecar spaces + columns[mc][] = recovered_columns[mc] + columnsOk = true + + if not columnsOk: + return err(VerifierError.Invalid) + let res = self.consensusManager.dag.addBackfillBlock(signedBlock) if res.isErr(): @@ -224,6 +275,11 @@ proc storeBackfillBlock( for b in blobs: self.consensusManager.dag.db.putBlobSidecar(b[]) + # Only store data columns after successfully establishing block validity + let columns = dataColumnsOpt.valueOr: DataColumnSidecars @[] + for c in columns: + self.consensusManager.dag.db.putDataColumnSidecar(c[]) + res from web3/engine_api_types import @@ -396,6 +452,7 @@ proc checkBloblessSignature( proc enqueueBlock*( self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], + data_columns: Opt[DataColumnSidecars], resfut: Future[Result[void, VerifierError]].Raising([CancelledError]) = nil, maybeFinalized = false, validationDur = Duration()) = @@ -403,7 +460,7 @@ proc enqueueBlock*( if forkyBlck.message.slot <= self.consensusManager.dag.finalizedHead.slot: # let backfill blocks skip the queue - these are always "fast" to process # because there are no state rewinds to deal with - let res = self.storeBackfillBlock(forkyBlck, blobs) + let res = self.storeBackfillBlock(forkyBlck, blobs, data_columns) resfut.complete(res) return @@ -411,6 +468,7 @@ proc enqueueBlock*( self.blockQueue.addLastNoWait(BlockEntry( blck: blck, blobs: blobs, + columns: data_columns, maybeFinalized: maybeFinalized, resfut: resfut, queueTick: Moment.now(), validationDur: validationDur, @@ -438,6 +496,7 @@ proc storeBlock( self: ref BlockProcessor, src: MsgSource, wallTime: BeaconTime, signedBlock: ForkySignedBeaconBlock, blobsOpt: Opt[BlobSidecars], + dataColumnsOpt: Opt[DataColumnSidecars], maybeFinalized = false, queueTick: Moment = Moment.now(), validationDur = Duration()): Future[Result[BlockRef, (VerifierError, ProcessingStatus)]] {.async: (raises: [CancelledError]).} = @@ -495,6 +554,9 @@ proc storeBlock( if blobsOpt.isSome: for blobSidecar in blobsOpt.get: self.blobQuarantine[].put(blobSidecar) + if dataColumnsOpt.isSome: + for dataColumnSidecar in dataColumnsOpt.get: + self.dataColumnQuarantine[].put(dataColumnSidecar) debug "Block quarantined", blockRoot = shortLog(signedBlock.root), blck = shortLog(signedBlock.message), @@ -552,7 +614,8 @@ proc storeBlock( if blobsOk: debug "Loaded parent block from storage", parent_root self[].enqueueBlock( - MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs) + MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, + Opt.none(DataColumnSidecars)) return handleVerifierError(parent.error()) @@ -830,11 +893,13 @@ proc storeBlock( withBlck(quarantined): when typeof(forkyBlck).kind < ConsensusFork.Deneb: self[].enqueueBlock( - MsgSource.gossip, quarantined, Opt.none(BlobSidecars)) + MsgSource.gossip, quarantined, Opt.none(BlobSidecars), + Opt.none(DataColumnSidecars)) else: if len(forkyBlck.message.body.blob_kzg_commitments) == 0: self[].enqueueBlock( - MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[])) + MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[]), + Opt.some(DataColumnSidecars @[])) else: if (let res = checkBloblessSignature(self[], forkyBlck); res.isErr): warn "Failed to verify signature of unorphaned blobless block", @@ -844,7 +909,8 @@ proc storeBlock( if self.blobQuarantine[].hasBlobs(forkyBlck): let blobs = self.blobQuarantine[].popBlobs( forkyBlck.root, forkyBlck) - self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs)) + self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), + Opt.none(DataColumnSidecars)) else: discard self.consensusManager.quarantine[].addBlobless( dag.finalizedHead.slot, forkyBlck) @@ -856,7 +922,7 @@ proc storeBlock( proc addBlock*( self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], maybeFinalized = false, + blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars], maybeFinalized = false, validationDur = Duration()): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = ## Enqueue a Gossip-validated block for consensus verification # Backpressure: @@ -868,7 +934,7 @@ proc addBlock*( # - RequestManager (missing ancestor blocks) # - API let resfut = newFuture[Result[void, VerifierError]]("BlockProcessor.addBlock") - enqueueBlock(self, src, blck, blobs, resfut, maybeFinalized, validationDur) + enqueueBlock(self, src, blck, blobs, dataColumns, resfut, maybeFinalized, validationDur) resfut # Event Loop @@ -889,8 +955,8 @@ proc processBlock( let res = withBlck(entry.blck): await self.storeBlock( - entry.src, wallTime, forkyBlck, entry.blobs, entry.maybeFinalized, - entry.queueTick, entry.validationDur) + entry.src, wallTime, forkyBlck, entry.blobs, entry.columns, + entry.maybeFinalized, entry.queueTick, entry.validationDur) if res.isErr and res.error[1] == ProcessingStatus.notCompleted: # When an execution engine returns an error or fails to respond to a @@ -901,7 +967,7 @@ proc processBlock( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/sync/optimistic.md#execution-engine-errors await sleepAsync(chronos.seconds(1)) self[].enqueueBlock( - entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized, + entry.src, entry.blck, entry.blobs, entry.columns, entry.resfut, entry.maybeFinalized, entry.validationDur) # To ensure backpressure on the sync manager, do not complete these futures. return diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 276d7e7c3c..cd3b295e51 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -8,14 +8,14 @@ {.push raises: [].} import - std/tables, + std/[tables, sequtils], chronicles, chronos, metrics, taskpools, - ../spec/[helpers, forks], + ../spec/[helpers, forks, peerdas_helpers], ../consensus_object_pools/[ blob_quarantine, block_clearance, block_quarantine, blockchain_dag, - attestation_pool, light_client_pool, sync_committee_msg_pool, - validator_change_pool], + data_column_quarantine, attestation_pool, light_client_pool, + sync_committee_msg_pool, validator_change_pool], ../validators/validator_pool, ../beacon_clock, "."/[gossip_validation, block_processor, batch_validation], @@ -46,6 +46,10 @@ declareCounter blob_sidecars_received, "Number of valid blobs processed by this node" declareCounter blob_sidecars_dropped, "Number of invalid blobs dropped by this node", labels = ["reason"] +declareCounter data_column_sidecars_received, + "Number of valid data columns processed by this node" +declareCounter data_column_sidecars_dropped, + "Number of invalid data columns dropped by this node", labels = ["reason"] declareCounter beacon_attester_slashings_received, "Number of valid attester slashings processed by this node" declareCounter beacon_attester_slashings_dropped, @@ -89,6 +93,10 @@ declareHistogram beacon_block_delay, declareHistogram blob_sidecar_delay, "Time(s) between slot start and blob sidecar reception", buckets = delayBuckets +declareHistogram data_column_sidecar_delay, + "Time(s) betweeen slot start and data column sidecar reception", + buckets = delayBuckets + type DoppelgangerProtection = object broadcastStartEpoch*: Epoch ##\ @@ -144,6 +152,8 @@ type blobQuarantine*: ref BlobQuarantine + dataColumnQuarantine*: ref DataColumnQuarantine + # Application-provided current time provider (to facilitate testing) getCurrentBeaconTime*: GetBeaconTimeFn @@ -247,9 +257,30 @@ proc processSignedBeaconBlock*( else: Opt.none(BlobSidecars) + let columns = + when typeof(signedBlock).kind >= ConsensusFork.Fulu: + if self.dataColumnQuarantine[].supernode: + if self.dataColumnQuarantine[].hasEnoughDataColumns(signedBlock): + Opt.some(self.dataColumnQuarantine[].popDataColumns(signedBlock.root, + signedBlock)) + else: + discard self.quarantine[].addColumnless(self.dag.finalizedHead.slot, + signedBlock) + return v + else: + if self.dataColumnQuarantine[].hasMissingDataColumns(signedBlock): + Opt.some(self.dataColumnQuarantine[].popDataColumns(signedBlock.root, + signedBlock)) + else: + discard self.quarantine[].addColumnless(self.dag.finalizedHead.slot, + signedBlock) + else: + Opt.none(DataColumnSidecars) + self.blockProcessor[].enqueueBlock( src, ForkedSignedBeaconBlock.init(signedBlock), blobs, + columns, maybeFinalized = maybeFinalized, validationDur = nanoseconds( (self.getCurrentBeaconTime() - wallTime).nanoseconds)) @@ -303,7 +334,8 @@ proc processBlobSidecar*( if self.blobQuarantine[].hasBlobs(forkyBlck): self.blockProcessor[].enqueueBlock( MsgSource.gossip, blobless, - Opt.some(self.blobQuarantine[].popBlobs(block_root, forkyBlck))) + Opt.some(self.blobQuarantine[].popBlobs(block_root, forkyBlck)), + Opt.none(DataColumnSidecars)) else: discard self.quarantine[].addBlobless( self.dag.finalizedHead.slot, forkyBlck) @@ -315,6 +347,78 @@ proc processBlobSidecar*( v +proc processDataColumnSidecar*( + self: var Eth2Processor, src: MsgSource, + dataColumnSidecar: DataColumnSidecar, subnet_id: uint64): ValidationRes = + template block_header: untyped = dataColumnSidecar.signed_block_header.message + + let + wallTime = self.getCurrentBeaconTime() + (_, wallSlot) = wallTime.toSlot() + + logScope: + dcs = shortLog(dataColumnSidecar) + wallSlot + + # Potential under/overflows are fine; would just create odd metrics and logs + let delay = wallTime - block_header.slot.start_beacon_time + debug "Data column received", delay + + let v = + self.dag.validateDataColumnSidecar(self.quarantine, self.dataColumnQuarantine, + dataColumnSidecar, wallTime, subnet_id) + + if v.isErr(): + debug "Dropping data column", error = v.error() + data_column_sidecars_dropped.inc(1, [$v.error[0]]) + return v + + debug "Data column validated, putting data column in quarantine" + self.dataColumnQuarantine[].put(newClone(dataColumnSidecar)) + + let block_root = hash_tree_root(block_header) + if (let o = self.quarantine[].popColumnless(block_root); o.isSome): + let columnless = o.unsafeGet() + withBlck(columnless): + when consensusFork >= ConsensusFork.Fulu: + if not self.dataColumnQuarantine[].supernode: + if self.dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): + let gathered_columns = + self.dataColumnQuarantine[].gatherDataColumns(forkyBlck.root) + for gdc in gathered_columns: + self.dataColumnQuarantine[].put(newClone(gdc)) + self.blockProcessor[].enqueueBlock( + MsgSource.gossip, columnless, + Opt.none(BlobSidecars), + Opt.some(self.dataColumnQuarantine[].popDataColumns(block_root, + forkyBlck))) + elif self.dataColumnQuarantine[].hasEnoughDataColumns(forkyBlck): + let + columns = self.dataColumnQuarantine[].gatherDataColumns(forkyBlck.root) + if columns.len >= (NUMBER_OF_COLUMNS div 2) and + self.dataColumnQuarantine[].supernode: + let + recovered_cps = recover_cells_and_proofs(columns.mapIt(it[])) + reconstructed_columns = + get_data_column_sidecars(forkyBlck, recovered_cps.get) + for rc in reconstructed_columns: + if rc notin columns.mapIt(it[]): + self.dataColumnQuarantine[].put(newClone(rc)) + self.blockProcessor[].enqueueBlock( + MsgSource.gossip, columnless, + Opt.none(BlobSidecars), + Opt.some(self.dataColumnQuarantine[].popDataColumns(block_root, forkyBlck))) + else: + discard self.quarantine[].addColumnless( + self.dag.finalizedHead.slot, forkyBlck) + else: + raiseAssert "Could not have been added as columnless" + + data_column_sidecars_received.inc() + data_column_sidecar_delay.observe(delay.toFloatSeconds()) + + v + proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) = # When another client's already running, this is very likely to detect # potential duplicate validators, which can trigger slashing. diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index e47cf32f64..d223b8d7df 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -146,8 +146,54 @@ proc recover_matrix*(partial_matrix: seq[MatrixEntry], ok(extended_matrix) +proc recover_cells_and_proofs*( + data_columns: seq[DataColumnSidecar]): + Result[seq[CellsAndProofs], cstring] = + + # This helper recovers blobs from the data column sidecars + if not (data_columns.len != 0): + return err("DataColumnSidecar: Length should not be 0") + + var + columnCount = data_columns.len + blobCount = data_columns[0].column.len + + for data_column in data_columns: + if not (blobCount == data_column.column.len): + return err ("DataColumns do not have the same length") + + var + recovered_cps = + newSeq[CellsAndProofs](blobCount) + + for blobIdx in 0.. Date: Mon, 20 Jan 2025 11:02:37 +0530 Subject: [PATCH 02/60] add columns to message router --- beacon_chain/networking/eth2_network.nim | 9 +++ beacon_chain/nimbus_beacon_node.nim | 10 +-- beacon_chain/validators/message_router.nim | 84 +++++++++++++++++++++- 3 files changed, 96 insertions(+), 7 deletions(-) diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index f521b5d40e..342c21c771 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -2774,6 +2774,15 @@ proc broadcastBlobSidecar*( node.forkDigestAtEpoch(contextEpoch), subnet_id) node.broadcast(topic, blob) +proc broadcastDataColumnSidecar*( + node: Eth2Node, subnet_id: uint64, data_column: DataColumnSidecar): + Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = + let + contextEpoch = data_column.signed_block_header.message.slot.epoch + topic = getDataColumnSidecarTopic( + node.forkDigestAtEpoch(contextEpoch), subnet_id) + node.broadcast(topic, data_column) + proc broadcastSyncCommitteeMessage*( node: Eth2Node, msg: SyncCommitteeMessage, subcommitteeIdx: SyncSubcommitteeIndex): diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index c6a97c6d24..7eca28df61 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -445,14 +445,16 @@ proc initFullNode( blobQuarantine, getBeaconTime) blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], maybeFinalized: bool): + blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars], + maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = # The design with a callback for block verification is unusual compared # to the rest of the application, but fits with the general approach # taken in the sync/request managers - this is an architectural compromise # that should probably be reimagined more holistically in the future. blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized) + MsgSource.gossip, signedBlock, blobs, dataColumns, + maybeFinalized = maybeFinalized) untrustedBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {. @@ -473,11 +475,11 @@ proc initFullNode( else: let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.some(blobs), + Opt.some(blobs), Opt.none(DataColumnSidecars), maybeFinalized = maybeFinalized) else: await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.none(BlobSidecars), + Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), maybeFinalized = maybeFinalized) rmanBlockLoader = proc( blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 0965453b9f..31f2fbb212 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -12,11 +12,13 @@ import chronicles, metrics, ../spec/network, + ../spec/peerdas_helpers, ../consensus_object_pools/spec_cache, ../gossip_processing/eth2_processor, ../networking/eth2_network, ./activity_metrics, - ../spec/datatypes/deneb + ../spec/datatypes/[deneb, fulu] + from ../spec/state_transition_block import validate_blobs export eth2_processor, eth2_network @@ -89,7 +91,18 @@ proc routeSignedBeaconBlock*( ## Validate and broadcast beacon block, then add it to the block database ## Returns the new Head when block is added successfully to dag, none when ## block passes validation but is not added, and error otherwise - let wallTime = router[].getCurrentBeaconTime() + let + wallTime = router[].getCurrentBeaconTime() + dataColumnsOpt = + when typeof(blck).kind >= ConsensusFork.Fulu: + newClone get_data_column_sidecars(blck, + blobsOpt.get.mapIt( + KzgBlob(bytes: it.blob))) + else: + newClone Opt.none(seq[DataColumnSidecar]) + + # Data columns extraction done early in the function + # in order to use the columns throughout. block: let vindex = ValidatorIndex(blck.message.proposer_index) @@ -130,6 +143,28 @@ proc routeSignedBeaconBlock*( msg = res.error() return err(res.error()) + # May not be required as we are already + # kzg verifying the blobs once + elif typeof(blck).kind >= ConsensusFork.Fulu: + if dataColumnsOpt.isSome: + let + dataColumns = dataColumnsOpt.mapIt(it[].get) + + let kzgCommits = + signedBlock.message.body.blob_kzg_commitments.asSeq + if dataColumns.len > 0 and kzgCommits.len > 0: + for i in 0.. Date: Mon, 20 Jan 2025 11:24:40 +0530 Subject: [PATCH 03/60] add columns to initializers of Eth2 and BlockProcessor --- beacon_chain/consensus_object_pools/blockchain_list.nim | 1 + beacon_chain/gossip_processing/block_processor.nim | 2 ++ beacon_chain/gossip_processing/eth2_processor.nim | 2 ++ beacon_chain/nimbus_beacon_node.nim | 8 +++++--- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index b7677f8b0b..f996f71e1a 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -14,6 +14,7 @@ import std/sequtils, stew/io2, chronicles, chronos, metrics, from ./block_pools_types import VerifierError, BlockData from ../spec/state_transition_block import validate_blobs +from ../spec/peerdas_helpers import verify_data_column_sidecar_kzg_proofs from std/os import `/` export beacon_chain_file diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index 77a1d2485d..d61a9e72da 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -135,6 +135,7 @@ proc new*(T: type BlockProcessor, consensusManager: ref ConsensusManager, validatorMonitor: ref ValidatorMonitor, blobQuarantine: ref BlobQuarantine, + dataColumnQuarantine: ref DataColumnQuarantine, getBeaconTime: GetBeaconTimeFn): ref BlockProcessor = (ref BlockProcessor)( dumpEnabled: dumpEnabled, @@ -144,6 +145,7 @@ proc new*(T: type BlockProcessor, consensusManager: consensusManager, validatorMonitor: validatorMonitor, blobQuarantine: blobQuarantine, + dataColumnQuarantine: dataColumnQuarantine, getBeaconTime: getBeaconTime, verifier: batchVerifier[] ) diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index cd3b295e51..b7a0ed3a6e 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -177,6 +177,7 @@ proc new*(T: type Eth2Processor, lightClientPool: ref LightClientPool, quarantine: ref Quarantine, blobQuarantine: ref BlobQuarantine, + dataColumnQuarantine: ref DataColumnQuarantine, rng: ref HmacDrbgContext, getBeaconTime: GetBeaconTimeFn, taskpool: Taskpool @@ -195,6 +196,7 @@ proc new*(T: type Eth2Processor, lightClientPool: lightClientPool, quarantine: quarantine, blobQuarantine: blobQuarantine, + dataColumnQuarantine: dataColumnQuarantine, getCurrentBeaconTime: getBeaconTime, batchCrypto: BatchCrypto.new( rng = rng, diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 7eca28df61..4d2e76c7a6 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -442,7 +442,7 @@ proc initFullNode( blockProcessor = BlockProcessor.new( config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, batchVerifier, consensusManager, node.validatorMonitor, - blobQuarantine, getBeaconTime) + blobQuarantine, dataColumnQuarantine, getBeaconTime) blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars], @@ -457,7 +457,8 @@ proc initFullNode( maybeFinalized = maybeFinalized) untrustedBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], - maybeFinalized: bool): Future[Result[void, VerifierError]] {. + data_columns: Opt[DataColumnSidecars], maybeFinalized: bool): + Future[Result[void, VerifierError]] {. async: (raises: [CancelledError], raw: true).} = clist.untrustedBackfillVerifier(signedBlock, blobs, maybeFinalized) rmanBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, @@ -503,7 +504,8 @@ proc initFullNode( config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, attestationPool, validatorChangePool, node.attachedValidators, syncCommitteeMsgPool, - lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool) + lightClientPool, quarantine, blobQuarantine, dataColumnQuarantine, + rng, getBeaconTime, taskpool) syncManagerFlags = if node.config.longRangeSync != LongRangeSyncMode.Lenient: {SyncManagerFlag.NoGenesisSync} From 21d771a2a003ecd905a4e94346f8f08240fca2e0 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 20 Jan 2025 17:31:27 +0530 Subject: [PATCH 04/60] save progress --- .../gossip_processing/block_processor.nim | 41 ++++++++++++++++++- beacon_chain/nimbus_beacon_node.nim | 37 +++++++++++++---- tests/test_block_processor.nim | 16 +++++--- 3 files changed, 79 insertions(+), 15 deletions(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index d61a9e72da..c622f33a14 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -453,8 +453,7 @@ proc checkBloblessSignature( proc enqueueBlock*( self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], - data_columns: Opt[DataColumnSidecars], + blobs: Opt[BlobSidecars], data_columns: Opt[DataColumnSidecars], resfut: Future[Result[void, VerifierError]].Raising([CancelledError]) = nil, maybeFinalized = false, validationDur = Duration()) = @@ -619,6 +618,26 @@ proc storeBlock( MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars)) + var columnsOk = true + let columns = + withBlck(parentBlck.get()): + when consensusFork >= ConsensusFork.Fulu: + var data_column_sidecars: DataColumnSidecars + for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: + let data_column = DataColumnSidecar.new() + if not dag.db.getDataColumnSidecar(parent_root, i.uint64, data_column[]): + columnsOk = false + break + data_column_sidecars.add data_column + Opt.some data_column_sidecars + else: + Opt.none DataColumnSidecars + if columnsOk: + debug "Loaded parent block from storage", parent_root + self[].enqueueBlock( + MsgSource.gossip, parentBlck.unsafeGet().asSigned(), Opt.none(BlobSidecars), + columns) + return handleVerifierError(parent.error()) let @@ -707,6 +726,24 @@ proc storeBlock( msg = r.error() return err((VerifierError.Invalid, ProcessingStatus.completed)) + elif typeof(signedBlock).kind >= ConsensusFork.Fulu: + if dataColumnsOpt.isSome: + let columns = dataColumnsOpt.get() + let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq + if columns.len > 0 and kzgCommits.len > 0: + for i in 0..= ConsensusFork.Fulu: + if not dataColumnQuarantine[].supernode and + not dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): + if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VeriferError.MissingParent) + elif dataColumnQuarantine[].supernode and + not dataColumnQuaratine[].hasEnoughDataColumns(forkyBlck): + if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VeriferError.MissingParent) + else: + let dataColumns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, + forkyBlck) + await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, + Opt.none(BlobSidecars), Opt.some(dataColumns), + maybeFinalized = maybeFinalized) + else: await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), @@ -571,12 +599,6 @@ proc initFullNode( # during peer selection, sync with columns, and so on. That is why, # the rationale of populating it at boot and using it gloabally. - dataColumnQuarantine[].supernode = supernode - dataColumnQuarantine[].custody_columns = - node.network.nodeId.resolve_columns_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, - localCustodyGroups)) - if node.config.peerdasSupernode: node.network.loadCgcnetMetadataAndEnr(NUMBER_OF_CUSTODY_GROUPS.uint8) else: @@ -605,6 +627,7 @@ proc initFullNode( node.dag = dag node.list = clist node.blobQuarantine = blobQuarantine + node.dataColumnQuarantine = dataColumnQuarantine node.quarantine = quarantine node.attestationPool = attestationPool node.syncCommitteeMsgPool = syncCommitteeMsgPool diff --git a/tests/test_block_processor.nim b/tests/test_block_processor.nim index 67a392c666..bfff207a29 100644 --- a/tests/test_block_processor.nim +++ b/tests/test_block_processor.nim @@ -14,12 +14,14 @@ import unittest2, taskpools, ../beacon_chain/conf, - ../beacon_chain/spec/[beaconstate, forks, helpers, state_transition], - ../beacon_chain/spec/datatypes/deneb, + ../beacon_chain/spec/[ + beaconstate, forks, helpers, + peerdas_helpers, state_transition], + ../beacon_chain/spec/datatypes/[deneb, fulu], ../beacon_chain/gossip_processing/block_processor, ../beacon_chain/consensus_object_pools/[ attestation_pool, blockchain_dag, blob_quarantine, block_quarantine, - block_clearance, consensus_manager], + block_clearance, consensus_manager, data_column_quarantine], ../beacon_chain/el/el_manager, ./testutil, ./testdbutil, ./testblockutil @@ -43,6 +45,7 @@ suite "Block processor" & preset(): taskpool = Taskpool.new() quarantine = newClone(Quarantine.init()) blobQuarantine = newClone(BlobQuarantine()) + dataColumnQuarantine = newClone(DataColumnQuarantine()) attestationPool = newClone(AttestationPool.init(dag, quarantine)) elManager = new ELManager # TODO: initialise this properly actionTracker: ActionTracker @@ -58,14 +61,15 @@ suite "Block processor" & preset(): batchVerifier = BatchVerifier.new(rng, taskpool) processor = BlockProcessor.new( false, "", "", batchVerifier, consensusManager, - validatorMonitor, blobQuarantine, getTimeFn) + validatorMonitor, blobQuarantine, dataColumnQuarantine, + getTimeFn) processorFut = processor.runQueueProcessingLoop() asyncTest "Reverse order block add & get" & preset(): let missing = await processor[].addBlock( MsgSource.gossip, ForkedSignedBeaconBlock.init(b2), - Opt.none(BlobSidecars)) + Opt.none(BlobSidecars), Opt.none(DataColumnSidecars)) check: missing.error == VerifierError.MissingParent @@ -77,7 +81,7 @@ suite "Block processor" & preset(): let status = await processor[].addBlock( MsgSource.gossip, ForkedSignedBeaconBlock.init(b1), - Opt.none(BlobSidecars)) + Opt.none(BlobSidecars), Opt.none(DataColumnSidecars)) b1Get = dag.getBlockRef(b1.root) check: From d3309d5cf92e2228879515390d00f211bdfba92c Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 20 Jan 2025 17:40:07 +0530 Subject: [PATCH 05/60] save progress 2 --- beacon_chain/nimbus_beacon_node.nim | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index b8652d8944..515c34bf63 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -452,7 +452,7 @@ proc initFullNode( blobQuarantine, dataColumnQuarantine, getBeaconTime) blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars], + blobs: Opt[BlobSidecars], data_columns: Opt[DataColumnSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = # The design with a callback for block verification is unusual compared @@ -460,7 +460,7 @@ proc initFullNode( # taken in the sync/request managers - this is an architectural compromise # that should probably be reimagined more holistically in the future. blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, blobs, dataColumns, + MsgSource.gossip, signedBlock, blobs, data_columns, maybeFinalized = maybeFinalized) untrustedBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], @@ -539,6 +539,8 @@ proc initFullNode( {SyncManagerFlag.NoGenesisSync} else: {} + + let syncManager = newSyncManager[Peer, PeerId]( node.network.peerPool, dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, From d4139e006dd13c37fb27fbedca0222fa42cad3a6 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 20 Jan 2025 17:52:39 +0530 Subject: [PATCH 06/60] add column to block verifier --- beacon_chain/nimbus_beacon_node.nim | 2 -- beacon_chain/sync/sync_queue.nim | 4 +++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 515c34bf63..803cbcaf7e 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -539,8 +539,6 @@ proc initFullNode( {SyncManagerFlag.NoGenesisSync} else: {} - - let syncManager = newSyncManager[Peer, PeerId]( node.network.peerPool, dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 75840c4bfe..6034f2bf12 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -27,7 +27,9 @@ type GetBoolCallback* = proc(): bool {.gcsafe, raises: [].} ProcessingCallback* = proc() {.gcsafe, raises: [].} BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], maybeFinalized: bool): + blobs: Opt[BlobSidecars], + data_columns: Opt[DataColumnSidecars], + maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} SyncQueueKind* {.pure.} = enum From 1197f099a0edb6827723e5cb309e99fab790c450 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 21 Jan 2025 13:33:24 +0530 Subject: [PATCH 07/60] save progress, need to rework untrusted syncing --- .../blockchain_list.nim | 1 - beacon_chain/nimbus_beacon_node.nim | 23 ++- beacon_chain/spec/peerdas_helpers.nim | 15 +- beacon_chain/sync/sync_manager.nim | 193 +++++++++++++++++- beacon_chain/sync/sync_queue.nim | 111 +++++++++- 5 files changed, 327 insertions(+), 16 deletions(-) diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index f996f71e1a..b7677f8b0b 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -14,7 +14,6 @@ import std/sequtils, stew/io2, chronicles, chronos, metrics, from ./block_pools_types import VerifierError, BlockData from ../spec/state_transition_block import validate_blobs -from ../spec/peerdas_helpers import verify_data_column_sidecar_kzg_proofs from std/os import `/` export beacon_chain_file diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 803cbcaf7e..957280f967 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -433,6 +433,10 @@ proc initFullNode( node.network.nodeId.resolve_column_sets_from_custody_groups( max(SAMPLES_PER_SLOT.uint64, localCustodyGroups)) + custody_columns_list = + node.network.nodeId.resolve_column_list_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, + localCustodyGroups)) dataColumnQuarantine[].supernode = supernode dataColumnQuarantine[].custody_columns = node.network.nodeId.resolve_columns_from_custody_groups( @@ -540,8 +544,9 @@ proc initFullNode( else: {} syncManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + node.network.peerPool, supernode, custody_columns_set, + custody_columns_list, dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, SyncQueueKind.Forward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, @@ -549,9 +554,10 @@ proc initFullNode( shutdownEvent = node.shutdownEvent, flags = syncManagerFlags) backfiller = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - SyncQueueKind.Backward, getLocalHeadSlot, + node.network.peerPool, supernode, custody_columns_set, + custody_columns_list, dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + SyncQueueKind.Forward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, dag.backfill.slot, blockVerifier, maxHeadAge = 0, @@ -563,9 +569,10 @@ proc initFullNode( else: getLocalWallSlot() untrustedManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - SyncQueueKind.Backward, getLocalHeadSlot, + node.network.peerPool, supernode, custody_columns_set, + custody_columns_list, dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + SyncQueueKind.Forward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getUntrustedBackfillSlot, getUntrustedFrontfillSlot, isWithinWeakSubjectivityPeriod, clistPivotSlot, untrustedBlockVerifier, maxHeadAge = 0, diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index d223b8d7df..3d3b447032 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -85,11 +85,20 @@ func resolve_columns_from_custody_groups*(node_id: NodeId, flattened func resolve_column_sets_from_custody_groups*(node_id: NodeId, - custody_group_count: CustodyIndex): - HashSet[ColumnIndex] = + custody_group_count: CustodyIndex): + HashSet[ColumnIndex] = node_id.resolve_columns_from_custody_groups(custody_group_count).toHashSet() +func resolve_column_list_from_custody_groups*(node_id: NodeId, + custody_group_count: CustodyIndex): + List[ColumnIndex, NUMBER_OF_COLUMNS] = + + let list = + List[ColumnIndex, NUMBER_OF_COLUMNS].init( + node_id.resolve_columns_from_custody_groups(custody_group_count)) + list + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_matrix proc compute_matrix*(blobs: seq[KzgBlob]): Result[seq[MatrixEntry], cstring] = ## `compute_matrix` helper demonstrates the relationship @@ -391,7 +400,7 @@ func get_extended_sample_count*(samples_per_slot: int, # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof proc verify_data_column_sidecar_inclusion_proof*(sidecar: DataColumnSidecar): - Result[void, cstring] = + Result[void, string] = ## Verify if the given KZG Commitments are in included ## in the beacon block or not let gindex = diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index 2e4d17a77d..531534693a 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -12,12 +12,14 @@ import stew/base10, chronos, chronicles, results import ../spec/datatypes/[phase0, altair], ../spec/eth2_apis/rest_types, - ../spec/[helpers, forks, network], + ../spec/[helpers, forks, network, peerdas_helpers], ../networking/[peer_pool, peer_scores, eth2_network], ../gossip_processing/block_processor, ../beacon_clock, "."/[sync_protocol, sync_queue] +from ssz_serialization import types + export phase0, altair, merge, chronos, chronicles, results, helpers, peer_scores, sync_queue, forks, sync_protocol @@ -55,7 +57,11 @@ type SyncManager*[A, B] = ref object pool: PeerPool[A, B] + supernode*: bool + custody_columns_set*: HashSet[ColumnIndex] + custody_columns_list*: List[ColumnIndex, NUMBER_OF_COLUMNS] DENEB_FORK_EPOCH: Epoch + FULU_FORK_EPOCH: Epoch MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: uint64 responseTimeout: chronos.Duration maxHeadAge: uint64 @@ -90,6 +96,8 @@ type NetRes[List[ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS]] BlobSidecarsRes = NetRes[List[ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS_ELECTRA)]] + DataColumnSidecarsRes = + NetRes[List[ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMN_SIDECARS)]] SyncBlockData* = object blocks*: seq[ref ForkedSignedBeaconBlock] @@ -132,7 +140,11 @@ proc initQueue[A, B](man: SyncManager[A, B]) = man.blockVerifier, 1, man.ident) proc newSyncManager*[A, B](pool: PeerPool[A, B], + supernode: bool, + custody_columns_set: HashSet[ColumnIndex], + custody_columns_list: List[ColumnIndex, NUMBER_OF_COLUMNS], denebEpoch: Epoch, + fuluEpoch: Epoch, minEpochsForBlobSidecarsRequests: uint64, direction: SyncQueueKind, getLocalHeadSlotCb: GetSlotCallback, @@ -157,7 +169,11 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B], var res = SyncManager[A, B]( pool: pool, + supernode: supernode, + custody_columns_set: custody_columns_set, + custody_columns_list: custody_columns_list, DENEB_FORK_EPOCH: denebEpoch, + FULU_FORK_EPOCH: fuluEpoch, MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: minEpochsForBlobSidecarsRequests, getLocalHeadSlot: getLocalHeadSlotCb, getLocalWallSlot: getLocalWallSlotCb, @@ -203,9 +219,65 @@ proc shouldGetBlobs[A, B](man: SyncManager[A, B], s: Slot): bool = (wallEpoch < man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS or epoch >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) +proc checkPeerCustody(man: SyncManager, + peer: Peer): + bool = + # Returns TRUE if the peer custodies atleast, + # ONE of the common custody columns, straight + # away return TRUE if the peer is a supernode. + if man.supernode: + # For a supernode, it is always best/optimistic + # to filter other supernodes, rather than filter + # too many full nodes that have a subset of the + # custody columns + if peer.lookupCgcFromPeer() == + NUMBER_OF_CUSTODY_GROUPS.uint64: + return true + + else: + if peer.lookupCgcFromPeer() == + NUMBER_OF_CUSTODY_GROUPS.uint64: + return true + + elif peer.lookupCgcFromPeer() == + CUSTODY_REQUIREMENT.uint64: + + # Fetch the remote custody count + let remoteCustodyGroupCount = + peer.lookupCgcFromPeer() + + # Extract remote peer's nodeID from peerID + # Fetch custody groups from remote peer + let + remoteNodeId = fetchNodeIdFromPeerId(peer) + remoteCustodyColumns = + remoteNodeId.resolve_column_sets_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, + remoteCustodyGroupCount)) + + for local_column in man.custody_columns_set: + if local_column in remoteCustodyColumns: + return false + + return true + + else: + return false + proc shouldGetBlobs[A, B](man: SyncManager[A, B], r: SyncRequest[A]): bool = man.shouldGetBlobs(r.slot) or man.shouldGetBlobs(r.slot + (r.count - 1)) +proc shouldGetDataColumns[A, B](man: SyncManager[A,B], s: Slot): bool = + let + wallEpoch = man.getLocalWallSlot().epoch + epoch = s.epoch() + (epoch >= man.FULU_FORK_EPOCH) and + (wallEpoch < man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS or + epoch >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) + +proc shouldGetDataColumns[A, B](man: SyncManager[A, B], r: SyncRequest[A]): bool = + man.shouldGetDataColumns(r.slot) or man.shouldGetDataColumns(r.slot + (r.count - 1)) + proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A, req: SyncRequest[A]): Future[BlobSidecarsRes] {.async: (raises: [CancelledError], raw: true).} = @@ -222,6 +294,24 @@ proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A, debug "Requesting blobs sidecars from peer", request = req blobSidecarsByRange(peer, req.slot, req.count) +proc getDataColumnSidecars[A, B](man: SyncManager[A, B], + peer: A, + req: SyncRequest): + Future[DataColumnSidecarsRes] + {.async: (raises: [CancelledError], raw: true).} = + mixin getScore, `==` + + logScope: + peer_score = peer.getScore() + peer_speed = peer.netKbps() + sync_ident = man.direction + topics = "syncman" + + doAssert(not(req.isEmpty()), "Request must not be empty!") + debug "Requesting data column sidecars from peer", request = req + dataColumnSidecarsByRange(peer, req.slot, req.count, man.custody_columns_list) + + proc remainingSlots(man: SyncManager): uint64 = let first = man.getFirstSlot() @@ -281,6 +371,42 @@ func checkBlobs(blobs: seq[BlobSidecars]): Result[void, string] = ? blob_sidecar[].verify_blob_sidecar_inclusion_proof() ok() +func groupDataColumns*( + blocks: seq[ref ForkedSignedBeaconBlock], + data_columns: seq[ref DataColumnSidecar] +): Result[seq[DataColumnSidecars], string] = + var + grouped = newSeq[DataColumnSidecars](len(blocks)) + column_cursor = 0 + for block_idx, blck in blocks: + withBlck(blck[]): + when consensusFork >= ConsensusFork.Fulu: + template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments + if kzgs.len == 0: + continue + # Clients MUST include all data column sidecars of each block from which they include data column sidecars. + # The following data column sidecars, where they exist, MUST be sent in consecutive (slot, index) order. + let header = forkyBlck.toSignedBeaconBlockHeader() + for column_idx in 0..= ConsensusFork.Fulu: + if forkyBlck.message.body.blob_kzg_commitments.len > 0: + hasDataColumns = true + break + hasDataColumns + let blobData = if shouldGetBlobs: let blobs = await man.getBlobSidecars(peer, req) @@ -578,6 +717,54 @@ proc syncStep[A, B]( else: Opt.none(seq[BlobSidecars]) + let dataColumnData = + if shouldGetDataColumns and man.checkPeerCustody(peer): + let data_columns = await man.getDataColumnSidecars(peer, req) + if data_columns.isErr(): + peer.updateScore(PeerScoreNoValues) + man.queue.push(req) + debug "Failed to receive data_columns on request", + request = req, err = data_columns.error + return + let dataColumnData = data_columns.get().asSeq() + debug "Received data columns on request", + data_columns_count = len(dataColumnData), + data_columns_map = getShortMap(req, dataColumnData), + request = req + + if len(dataColumnData) > 0: + let slots = + mapIt(dataColumnData, it[].signed_block_header.message.slot) + checkDataColumnsResponse(req, slots).isOkOr: + peer.updateScore(PeerScoreBadResponse) + man.queue.push(req) + warn "Incorrect data column sequence received", + data_columns_count = len(dataColumnData), + data_columns_map = getShortMap(req, dataColumnData), + request = req, + reason = error + return + let groupedDataColumns = groupDataColumns(blockData, dataColumnData).valueOr: + peer.updateScore(PeerScoreNoValues) + man.queue.push(req) + info "Received data columns sequence is inconsistent", + data_columns_map = getShortMap(req, dataColumnData), + request = req, msg = error + return + + groupedDataColumns.checkDataColumns().isOkOr: + peer.updateScore(PeerScoreBadResponse) + man.queue.push(req) + warn "Recieved data columns verification failed", + data_columns_count = len(dataColumnData), + data_columns_map = getShortMap(req, dataColumnData), + request = req, + reason = error + return + Opt.some(groupedDataColumns) + else: + Opt.none(seq[DataColumnSidecars]) + if len(blockData) == 0 and man.direction == SyncQueueKind.Backward and req.contains(man.getSafeSlot()): # The sync protocol does not distinguish between: @@ -602,7 +789,9 @@ proc syncStep[A, B]( # TODO descore peers that lie maybeFinalized = lastSlot < peerFinalized - await man.queue.push(req, blockData, blobData, maybeFinalized, proc() = + await man.queue.push( + req, blockData, blobData, + dataColumnData, maybeFinalized, proc() = man.workers[index].status = SyncWorkerStatus.Processing) proc syncWorker[A, B]( diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 6034f2bf12..20947c9fed 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -46,6 +46,7 @@ type request*: SyncRequest[T] data*: seq[ref ForkedSignedBeaconBlock] blobs*: Opt[seq[BlobSidecars]] + dataColumns*: Opt[seq[DataColumnSidecars]] GapItem*[T] = object start*: Slot @@ -134,6 +135,27 @@ proc getShortMap*[T](req: SyncRequest[T], res.add('|') res +proc getShortMap*[T](req: SyncRequest[T], + data: openArray[ref DataColumnSidecar]): string = + ## Returns all slot numbers in ``data`` as placement map. + var res = newStringOfCap(req.count * MAX_BLOBS_PER_BLOCK) + var cur : uint64 = 0 + for slot in req.slot..= lenu64(data): + res.add('|') + continue + if slot == data[cur].signed_block_header.message.slot: + for k in cur..= lenu64(data) or slot != data[k].signed_block_header.message.slot: + res.add('|') + break + else: + inc(cur) + res.add('x') + else: + res.add('|') + res + proc contains*[T](req: SyncRequest[T], slot: Slot): bool {.inline.} = slot >= req.slot and slot < req.slot + req.count @@ -202,6 +224,38 @@ proc checkBlobsResponse*[T](req: SyncRequest[T], ok() +proc checkDataColumnsResponse*[T](req: SyncRequest[T], + data: openArray[Slot]): + Result[void, cstring] = + if data.len == 0: + # Impossible to verify empty response + return ok() + + static: doAssert MAX_BLOBS_PER_BLOCK_ELECTRA >= MAX_BLOBS_PER_BLOCK + + if lenu64(data) > (req.count * MAX_BLOBS_PER_BLOCK_ELECTRA): + # Number of data columns in response should be less or equal to + # number of requested (blocks * MAX_BLOCKS_PER_BLOCK_ELECTRA). + return err("Too many data columns received") + + var + pSlot = data[0] + counter = 0'u64 + for slot in data: + if (slot < req.slot) or (slot >= req.slot + req.count): + return err("Some of the data columns are not in requested range") + if slot < pSlot: + return err("incorrect order") + if slot == pSlot: + inc counter + if counter > MAX_BLOBS_PER_BLOCK_ELECTRA: + return err("Number of data columns in the block exceeds the limit") + else: + counter = 1'u64 + pSlot = slot + + ok() + proc init[T](t1: typedesc[SyncRequest], kind: SyncQueueKind, start: Slot, finish: Slot, t2: typedesc[T]): SyncRequest[T] = let count = finish - start + 1'u64 @@ -581,6 +635,14 @@ func getOpt(blobs: Opt[seq[BlobSidecars]], i: int): Opt[BlobSidecars] = else: Opt.none(BlobSidecars) +# This belongs inside the blocks iterator below, but can't be there due to +# https://github.com/nim-lang/Nim/issues/21242 +func getOpt(data_columns: Opt[seq[DataColumnSidecars]], i: int): Opt[DataColumnSidecars] = + if data_columns.isSome: + Opt.some(data_columns.get()[i]) + else: + Opt.none DataColumnSidecars + iterator blocks[T](sq: SyncQueue[T], sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) = case sq.kind @@ -591,6 +653,16 @@ iterator blocks[T](sq: SyncQueue[T], for i in countdown(len(sr.data) - 1, 0): yield (sr.data[i], sr.blobs.getOpt(i)) +iterator das_blocks[T](sq: SyncQueue[T], + sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[DataColumnSidecars]) = + case sq.kind + of SyncQueueKind.Forward: + for i in countup(0, len(sr.data) - 1): + yield (sr.data[i], sr.data_columns.getOpt(i)) + of SyncQueueKind.Backward: + for i in countdown(len(sr.data) - 1, 0): + yield (sr.data[i], sr.data_columns.getOpt(i)) + proc advanceOutput*[T](sq: SyncQueue[T], number: uint64) = case sq.kind of SyncQueueKind.Forward: @@ -644,6 +716,7 @@ func numAlreadyKnownSlots[T](sq: SyncQueue[T], sr: SyncRequest[T]): uint64 = proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], data: seq[ref ForkedSignedBeaconBlock], blobs: Opt[seq[BlobSidecars]], + dataColumns: Opt[seq[DataColumnSidecars]], maybeFinalized: bool = false, processingCb: ProcessingCallback = nil) {.async: (raises: [CancelledError]).} = logScope: @@ -671,7 +744,8 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], # SyncQueue reset happens. We are exiting to wake up sync-worker. return else: - let syncres = SyncResult[T](request: sr, data: data, blobs: blobs) + let syncres = SyncResult[T](request: sr, data: data, blobs: blobs, + dataColumns: dataColumns) sq.readyQueue.push(syncres) break @@ -722,7 +796,7 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], var i=0 for blk, blb in sq.blocks(item): - res = await sq.blockVerifier(blk[], blb, maybeFinalized) + res = await sq.blockVerifier(blk[], blb, Opt.none(DataColumnSidecars), maybeFinalized) inc(i) if res.isOk(): @@ -752,6 +826,39 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], req.item.updateScore(PeerScoreBadValues) break + var counter = 0 + for blk, col in sq.das_blocks(item): + res = + await sq.blockVerifier(blk[], Opt.none(BlobSidecars), col, maybeFinalized) + inc counter + + if res.isOk: + goodBlock = some(blk[].slot) + else: + case res.error() + of VerifierError.MissingParent: + missingParentSlot = some(blk[].slot) + break + of VerifierError.Duplicate: + # Keep going, happens naturally + discard + of VerifierError.UnviableFork: + # Keep going so as to register other unviable blocks with the + # quarantine + if unviableBlock.isNone: + # Remember the first unviable block, so we can log it + unviableBlock = some((blk[].root, blk[].slot)) + + of VerifierError.Invalid: + hasInvalidBlock = true + + let req = item.request + notice "Received invalid sequence of blocks", request = req, + blocks_count = len(item.data), + blocks_map = getShortMap(req, item.data) + req.item.updateScore(PeerScoreBadValues) + break + # When errors happen while processing blocks, we retry the same request # with, hopefully, a different peer let retryRequest = From e73379eabbd11e08dd6b87c4bc8896b4ea853420 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 21 Jan 2025 16:19:56 +0530 Subject: [PATCH 08/60] add column support to light forward sync --- beacon_chain/beacon_chain_file.nim | 71 ++++++++++++++++++- .../block_pools_types.nim | 1 + .../blockchain_list.nim | 47 +++++++++--- .../gossip_processing/eth2_processor.nim | 1 + beacon_chain/nimbus_beacon_node.nim | 4 +- beacon_chain/sync/sync_manager.nim | 1 + beacon_chain/sync/sync_overseer.nim | 10 ++- 7 files changed, 119 insertions(+), 16 deletions(-) diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index f19d4cf84c..52cf9be55c 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -79,6 +79,8 @@ const int(ConsensusFork.Phase0) .. int(high(ConsensusFork)) BlobForkCodeRange = MaxForksCount .. (MaxForksCount + int(high(ConsensusFork)) - int(ConsensusFork.Deneb)) + DataColumnForkCodeRange = + MaxForksCount .. (MaxForksCount + int(high(ConsensusFork)) - int(ConsensusFork.Fulu)) func getBlockForkCode(fork: ConsensusFork): uint64 = uint64(fork) @@ -94,6 +96,13 @@ func getBlobForkCode(fork: ConsensusFork): uint64 = of ConsensusFork.Phase0 .. ConsensusFork.Capella: raiseAssert "Blobs are not supported for the fork" +func getDataColumnForkCode(fork: ConsensusFork): uint64 = + case fork + of ConsensusFork.Fulu: + uint64(MaxForksCount) + of ConsensusFork.Phase0 .. ConsensusFork.Electra: + raiseAssert "Data columns are not supported for the fork" + proc init(t: typedesc[ChainFileError], k: ChainFileErrorType, m: string): ChainFileError = ChainFileError(kind: k, message: m) @@ -134,7 +143,8 @@ proc checkKind(kind: uint64): Result[void, string] = if res > uint64(high(int)): return err("Unsuppoted chunk kind value") int(res) - if (hkind in BlockForkCodeRange) or (hkind in BlobForkCodeRange): + if (hkind in BlockForkCodeRange) or (hkind in BlobForkCodeRange) or + (hkind in DataColumnForkCodeRange): ok() else: err("Unsuppoted chunk kind value") @@ -260,6 +270,12 @@ template getBlobChunkKind(kind: ConsensusFork, last: bool): uint64 = else: getBlobForkCode(kind) +template getDataColumnChunkKind(kind: ConsensusFork,last: bool): uint64 = + if last: + maskKind(getDataColumnForkCode(kind)) + else: + getDataColumnForkCode(kind) + proc getBlockConsensusFork(header: ChainFileHeader): ConsensusFork = let hkind = unmaskKind(header.kind) if int(hkind) in BlockForkCodeRange: @@ -275,6 +291,10 @@ template isBlob(h: ChainFileHeader | ChainFileFooter): bool = let hkind = unmaskKind(h.kind) int(hkind) in BlobForkCodeRange +template isDataColumn(h: ChainFileHeader | ChainFileFooter): bool = + let hkind = unmaskKind(h.kind) + int(hkind) in DataColumnForkCodeRange + template isLast(h: ChainFileHeader | ChainFileFooter): bool = h.kind.isLast() @@ -291,7 +311,8 @@ proc setTail*(chandle: var ChainFileHandle, bdata: BlockData) = chandle.data.tail = Opt.some(bdata) proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars]): Result[void, string] = + blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars]): + Result[void, string] = let origOffset = updateFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).valueOr: return err(ioErrorMsg(error)) @@ -342,6 +363,36 @@ proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock, discard fsync(chandle.handle) return err(IncompleteWriteError) + if dataColumns.isSome(): + let dataColumnSidecars = + dataColumns.get + for index, dataColumn in dataColumnSidecars.pairs(): + let + kind = + getDataColumnChunkKind(signedBlock.kind, (index + 1) == + len(dataColumnSidecars)) + (data, plainSize) = + block: + let res = SSZ.encode(dataColumn[]) + (snappy.encode(res), len(res)) + slot = dataColumn[].signed_block_header.message.slot + buffer = Chunk.init(kind, uint64(slot), uint32(plainSize), data) + + setFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).isOkOr: + discard truncate(chandle.handle, origOffset) + discard fsync(chandle.handle) + return err(ioErrorMsg(error)) + + let + wrote = writeFile(chandle.handle, buffer).valueOr: + discard truncate(chandle.handle, origOffset) + discard fsync(chandle.handle) + return err(ioErrorMsg(error)) + if wrote != uint(len(buffer)): + discard truncate(chandle.handle, origOffset) + discard fsync(chandle.handle) + return err(IncompleteWriteError) + fsync(chandle.handle).isOkOr: discard truncate(chandle.handle, origOffset) return err(ioErrorMsg(error)) @@ -550,6 +601,22 @@ proc decodeBlob( return err("Incorrect blob format") ok(blob) +proc decodeDataColumn( + header: ChainFileHeader, + data: openArray[byte], +): Result[DataColumnSidecar, string] = + if header.plainSize > uint32(MaxChunkSize): + return err("Size of data column is enormously big") + + let + decompressed = snappy.decode(data, uint32(header.plainSize)) + dataColumn = + try: + SSZ.decode(decompressed, DataColumnSidecar) + except SerializationError: + return err("Incorrect data column format") + ok(dataColumn) + proc getChainFileTail*(handle: IoHandle): Result[Opt[BlockData], string] = var sidecars: BlobSidecars while true: diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index 6235ea2bf3..cad7cd8bae 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -288,6 +288,7 @@ type BlockData* = object blck*: ForkedSignedBeaconBlock blob*: Opt[BlobSidecars] + dataColumn*: Opt[DataColumnSidecars] OnBlockAdded*[T: ForkyTrustedSignedBeaconBlock] = proc( blckRef: BlockRef, blck: T, epochRef: EpochRef, diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index b7677f8b0b..41284b956a 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -9,6 +9,7 @@ import std/sequtils, stew/io2, chronicles, chronos, metrics, ../spec/forks, + ../spec/peerdas_helpers, ../[beacon_chain_file, beacon_clock], ../sszdump @@ -128,16 +129,17 @@ proc setTail*(clist: ChainListRef, bdata: BlockData) = clist.handle = Opt.some(handle) proc store*(clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars]): Result[void, string] = + blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars]): + Result[void, string] = if clist.handle.isNone(): let filename = clist.path.chainFilePath() flags = {ChainFileFlag.Repair, ChainFileFlag.OpenAlways} handle = ? ChainFileHandle.init(filename, flags) clist.handle = Opt.some(handle) - store(handle, signedBlock, blobs) + store(handle, signedBlock, blobs, dataColumns) else: - store(clist.handle.get(), signedBlock, blobs) + store(clist.handle.get(), signedBlock, blobs, dataColumns) proc checkBlobs(signedBlock: ForkedSignedBeaconBlock, blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = @@ -167,9 +169,31 @@ proc checkBlobs(signedBlock: ForkedSignedBeaconBlock, return err(VerifierError.Invalid) ok() +proc checkDataColumns*(signedBlock: ForkedSignedBeaconBlock, + dataColumnsOpt: Opt[DataColumnSidecars]): + Result[void, VerifierError] = + withBlck(signedBlock): + when consensusFork >= ConsensusFork.Fulu: + if dataColumnsOpt.isSome: + let dataColumns = dataColumnsOpt.get() + if dataColumns.len > 0: + for i in 0.. Date: Tue, 21 Jan 2025 18:32:00 +0530 Subject: [PATCH 09/60] save progress test sync manager --- tests/test_sync_manager.nim | 98 ++++++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 24 deletions(-) diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 651289e5f8..48398f7ef8 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -13,7 +13,8 @@ import unittest2 import chronos import ../beacon_chain/gossip_processing/block_processor, ../beacon_chain/sync/sync_manager, - ../beacon_chain/spec/forks + ../beacon_chain/spec/forks, + ../beacon_chain/spec/peerdas_helpers type SomeTPeer = ref object @@ -50,7 +51,7 @@ func collector(queue: AsyncQueue[BlockEntry]): BlockVerifier = # testing goes, this is risky because it might introduce differences between # the BlockProcessor and this test proc verify(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], - maybeFinalized: bool): + dataColumns: Opt[DataColumnSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = let fut = Future[Result[void, VerifierError]].Raising([CancelledError]).init() try: queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut)) @@ -71,6 +72,18 @@ suite "SyncManager test suite": curslot = curslot + 1'u64 res + func createPeerdasChain(start, finish: Slot): + seq[ref ForkedSignedBeaconBlock] = + doAssert(start <= finish) + let count = int(finish - start + 1'u64) + var res = newSeq[ref ForkedSignedBeaconBlock](count) + var curslot = start + for item in res.mitems(): + item = newClone ForkedSignedBeaconBlock(kind: ConsensusFork.Fulu) + item[].fuluData.message.slot = curslot + curslot = curslot + 1'u64 + res + func createBlobs( blocks: var seq[ref ForkedSignedBeaconBlock], slots: seq[Slot] ): seq[ref BlobSidecar] = @@ -98,6 +111,21 @@ suite "SyncManager test suite": inc sidecarIdx res + func createDataColumns( + blocks: var seq[ref ForkedSignedBeaconBlock], slots: seq[Slot] + ): seq[ref DataColumnSidecar] = + let blob_sidecars = createBlobs(blocks, slots) + var res = newSeq[ref DataColumnSidecar](len(slots)) + for blb_scr in blob_sidecars: + withBlck(blck[]): + when consensusFork >= ConsensusFork.Fulu: + var count = 0 + res[count] = get_data_column_sidecars(forkyBlck, + blb_scr.mapIt( + KzgBlob(bytes: it[].blob))) + inc count + res + func getSlice(chain: openArray[ref ForkedSignedBeaconBlock], startSlot: Slot, request: SyncRequest[SomeTPeer]): seq[ref ForkedSignedBeaconBlock] = let @@ -354,7 +382,8 @@ suite "SyncManager test suite": if request.isEmpty(): break await queue.push(request, getSlice(chain, start, request), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await validatorFut.cancelAndWait() waitFor runSmokeTest() @@ -429,7 +458,8 @@ suite "SyncManager test suite": var r13 = queue.pop(finishSlot, p3) var f13 = queue.push(r13, chain.getSlice(startSlot, r13), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check: f13.finished == false @@ -438,7 +468,8 @@ suite "SyncManager test suite": of SyncQueueKind.Backward: counter == int(finishSlot) var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check: case kkind @@ -448,7 +479,8 @@ suite "SyncManager test suite": f13.finished == false var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f11, f12, f13) check: f12.finished == true and f12.failed == false @@ -551,7 +583,8 @@ suite "SyncManager test suite": check response[0][].slot >= getFowardSafeSlotCb() else: check response[^1][].slot <= getBackwardSafeSlotCb() - await queue.push(request, response, Opt.none(seq[BlobSidecars])) + await queue.push(request, response, Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await validatorFut.cancelAndWait() waitFor runTest() @@ -634,7 +667,8 @@ suite "SyncManager test suite": # Handle request 1. Should be re-enqueued as it simulates `Invalid`. let response1 = getSlice(chain, start, request1) - await queue.push(request1, response1, Opt.none(seq[BlobSidecars])) + await queue.push(request1, response1, Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) check debtLen(queue) == request2.count + request1.count # Request 1 should be discarded as it is no longer relevant. @@ -646,7 +680,8 @@ suite "SyncManager test suite": # Handle request 3. Should be re-enqueued as it simulates `Invalid`. let response3 = getSlice(chain, start, request3) - await queue.push(request3, response3, Opt.none(seq[BlobSidecars])) + await queue.push(request3, response3, Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) check debtLen(queue) == request3.count # Request 2 should be re-issued. @@ -660,7 +695,8 @@ suite "SyncManager test suite": # Handle request 4. Should be re-enqueued as it simulates `Invalid`. let response4 = getSlice(chain, start, request4) - await queue.push(request4, response4, Opt.none(seq[BlobSidecars])) + await queue.push(request4, response4, Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) check debtLen(queue) == request4.count # Advance `safeSlot` out of band. @@ -777,14 +813,16 @@ suite "SyncManager test suite": var r14 = queue.pop(finishSlot, p4) var f14 = queue.push(r14, chain.getSlice(startSlot, r14), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check: f14.finished == false counter == int(startSlot) var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check: counter == int(startSlot) @@ -792,7 +830,8 @@ suite "SyncManager test suite": f14.finished == false var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f11, f12) check: counter == int(startSlot + chunkSize + chunkSize) @@ -804,7 +843,8 @@ suite "SyncManager test suite": withBlck(missingSlice[0][]): forkyBlck.message.proposer_index = 0xDEADBEAF'u64 var f13 = queue.push(r13, missingSlice, - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f13, f14) check: f11.finished == true and f11.failed == false @@ -826,17 +866,20 @@ suite "SyncManager test suite": check r18.isEmpty() == true var f17 = queue.push(r17, chain.getSlice(startSlot, r17), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check f17.finished == false var f16 = queue.push(r16, chain.getSlice(startSlot, r16), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check f16.finished == false var f15 = queue.push(r15, chain.getSlice(startSlot, r15), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f15, f16, f17) check: f15.finished == true and f15.failed == false @@ -883,7 +926,8 @@ suite "SyncManager test suite": # Push a single request that will fail with all blocks being unviable var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) discard await f11.withTimeout(1.seconds) check: @@ -949,14 +993,16 @@ suite "SyncManager test suite": var r14 = queue.pop(finishSlot, p4) var f14 = queue.push(r14, chain.getSlice(startSlot, r14), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check: f14.finished == false counter == int(finishSlot) var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check: counter == int(finishSlot) @@ -964,7 +1010,8 @@ suite "SyncManager test suite": f14.finished == false var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f11, f12) check: counter == int(finishSlot - chunkSize - chunkSize) @@ -975,7 +1022,8 @@ suite "SyncManager test suite": var missingSlice = chain.getSlice(startSlot, r13) withBlck(missingSlice[0][]): forkyBlck.message.proposer_index = 0xDEADBEAF'u64 - var f13 = queue.push(r13, missingSlice, Opt.none(seq[BlobSidecars])) + var f13 = queue.push(r13, missingSlice, Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f13, f14) check: f11.finished == true and f11.failed == false @@ -993,12 +1041,14 @@ suite "SyncManager test suite": check r17.isEmpty() == true var f16 = queue.push(r16, chain.getSlice(startSlot, r16), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await sleepAsync(100.milliseconds) check f16.finished == false var f15 = queue.push(r15, chain.getSlice(startSlot, r15), - Opt.none(seq[BlobSidecars])) + Opt.none(seq[BlobSidecars]), + Opt.none(seq[DataColumnSidecars])) await allFutures(f15, f16) check: f15.finished == true and f15.failed == false From c1a20133561597850260ccae4c96f7633a63c9f3 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 21 Jan 2025 20:14:42 +0530 Subject: [PATCH 10/60] fix createDataColumns --- tests/test_sync_manager.nim | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 48398f7ef8..e166afbb57 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -116,14 +116,15 @@ suite "SyncManager test suite": ): seq[ref DataColumnSidecar] = let blob_sidecars = createBlobs(blocks, slots) var res = newSeq[ref DataColumnSidecar](len(slots)) - for blb_scr in blob_sidecars: - withBlck(blck[]): - when consensusFork >= ConsensusFork.Fulu: - var count = 0 - res[count] = get_data_column_sidecars(forkyBlck, - blb_scr.mapIt( - KzgBlob(bytes: it[].blob))) - inc count + for blck in blocks: + for blb_scr in blob_sidecars: + withBlck(blck[]): + when consensusFork >= ConsensusFork.Fulu: + var count = 0 + res[count] = get_data_column_sidecars(forkyBlck, + blb_scr.mapIt( + KzgBlob(bytes: it[].blob))) + inc count res func getSlice(chain: openArray[ref ForkedSignedBeaconBlock], startSlot: Slot, From 38b042132504529d2ad0f50ad5f44ef377fbee18 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 21 Jan 2025 20:47:00 +0530 Subject: [PATCH 11/60] fix more --- tests/test_sync_manager.nim | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index e166afbb57..43d810e77a 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -111,20 +111,33 @@ suite "SyncManager test suite": inc sidecarIdx res - func createDataColumns( + proc createDataColumns( blocks: var seq[ref ForkedSignedBeaconBlock], slots: seq[Slot] ): seq[ref DataColumnSidecar] = - let blob_sidecars = createBlobs(blocks, slots) var res = newSeq[ref DataColumnSidecar](len(slots)) for blck in blocks: - for blb_scr in blob_sidecars: - withBlck(blck[]): - when consensusFork >= ConsensusFork.Fulu: - var count = 0 - res[count] = get_data_column_sidecars(forkyBlck, - blb_scr.mapIt( - KzgBlob(bytes: it[].blob))) - inc count + withBlck(blck[]): + when consensusFork >= ConsensusFork.Fulu: + template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments + for i, slot in slots: + if slot == forkyBlck.message.slot: + doAssert kzgs.add default(KzgCommitment) + if kzgs.len > 0: + forkyBlck.root = hash_tree_root(forkyBlck.message) + var + kzg_proofs: KzgProofs + blobs: Blobs + for _ in kzgs: + doAssert kzg_proofs.add default(KzgProof) + doAssert blobs.add default(Blob) + let bsidecars = forkyBlck.create_blob_sidecars(kzg_proofs, blobs) + let dcsidecars = + forkyBlck.get_data_column_sidecars(bsidecars.mapIt(KzgBlob(bytes: it.blob))) + var sidecarIdx = 0 + for i, slot in slots: + if slot == forkyBlck.message.slot: + res[i] = newClone dcsidecars.get[sidecarIdx] + inc sidecarIdx res func getSlice(chain: openArray[ref ForkedSignedBeaconBlock], startSlot: Slot, From 8a1825b5d05af5b7a8a6909976f5ab6b7c7ab1fa Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Wed, 22 Jan 2025 01:28:52 +0530 Subject: [PATCH 12/60] added fulu message handlers for column subnets --- beacon_chain/nimbus_beacon_node.nim | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 3c955c765a..c5fd360986 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1308,6 +1308,12 @@ func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = subnets + node.getNextSyncCommitteeSubnets(epoch) +func readCustodyGroupSubnets*(node: BeaconNode): uint64= + var res = CUSTODY_REQUIREMENT.uint64 + if node.config.peerdasSupernode: + res = NUMBER_OF_CUSTODY_GROUPS.uint64 + res + proc addAltairMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = node.addPhase0MessageHandlers(forkDigest, slot) @@ -1344,6 +1350,15 @@ proc addElectraMessageHandlers( proc addFuluMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = node.addElectraMessageHandlers(forkDigest, slot) + let + targetSubnets = node.readCustodyGroupSubnets() + custody = node.network.nodeId.get_custody_groups(max(SAMPLES_PER_SLOT.uint64, + targetSubnets.uint64)) + + for i in 0'u64.. Date: Wed, 22 Jan 2025 01:35:05 +0530 Subject: [PATCH 13/60] activated data column sidecar processing at Fulu --- beacon_chain/nimbus_beacon_node.nim | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index c5fd360986..0b1166c754 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -2145,6 +2145,19 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processBlobSidecar( MsgSource.gossip, blobSidecar, subnet_id))) + when consensusFork >= ConsensusFork.Fulu: + # data_column_sidecar_{subnet_id} + for it in 0'u64.. Date: Wed, 22 Jan 2025 11:45:36 +0530 Subject: [PATCH 14/60] fix compilation issues --- beacon_chain/networking/eth2_network.nim | 2 +- beacon_chain/nimbus_beacon_node.nim | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 342c21c771..0222c3c5ea 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -853,7 +853,7 @@ template gossipMaxSize(T: untyped): uint32 = fixedPortionSize(T).uint32 elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock or - T is fulu.SignedBeaconBlock: + T is fulu.SignedBeaconBlock or T is fulu.DataColumnSidecar: GOSSIP_MAX_SIZE # TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for # Attestation, AttesterSlashing, and SignedAggregateAndProof, which all diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 0b1166c754..e25a46d600 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -2152,7 +2152,7 @@ proc installMessageValidators(node: BeaconNode) = let subnet_id = it node.network.addValidator( getDataColumnSidecarTopic(digest, subnet_id), proc ( - dataColumnSidecar: DataColumnSidecar + dataColumnSidecar: fulu.DataColumnSidecar ): ValidationResult = toValidationResult( node.processor[].processDataColumnSidecar( From 9756cce22ae9d44de4f61499b115e85013537250 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Wed, 22 Jan 2025 13:05:29 +0530 Subject: [PATCH 15/60] added to T list --- beacon_chain/networking/eth2_network.nim | 4 ++-- beacon_chain/spec/datatypes/base.nim | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 0222c3c5ea..81284dd7ba 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -853,7 +853,7 @@ template gossipMaxSize(T: untyped): uint32 = fixedPortionSize(T).uint32 elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock or - T is fulu.SignedBeaconBlock or T is fulu.DataColumnSidecar: + T is fulu.SignedBeaconBlock: GOSSIP_MAX_SIZE # TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for # Attestation, AttesterSlashing, and SignedAggregateAndProof, which all @@ -863,7 +863,7 @@ template gossipMaxSize(T: untyped): uint32 = T is phase0.SignedAggregateAndProof or T is phase0.SignedBeaconBlock or T is electra.SignedAggregateAndProof or T is electra.Attestation or T is electra.AttesterSlashing or T is altair.SignedBeaconBlock or - T is SomeForkyLightClientObject: + T is SomeForkyLightClientObject or T is fulu.DataColumnSidecar: GOSSIP_MAX_SIZE else: {.fatal: "unknown type " & name(T).} diff --git a/beacon_chain/spec/datatypes/base.nim b/beacon_chain/spec/datatypes/base.nim index 18503682be..f9f758b62c 100644 --- a/beacon_chain/spec/datatypes/base.nim +++ b/beacon_chain/spec/datatypes/base.nim @@ -206,6 +206,8 @@ type ## The `BlobId` type is constrained to values in the range ## `[0, MAX_BLOBS_PER_BLOCK_ELECTRA)` during initialization. + + # BitVector[4] in the spec, ie 4 bits which end up encoded as a byte for # SSZ / hashing purposes JustificationBits* = distinct uint8 From d527648610c2dfcd9cab9ea4e69f258834d9fae4 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Wed, 22 Jan 2025 13:06:20 +0530 Subject: [PATCH 16/60] other fixes --- beacon_chain/networking/eth2_network.nim | 4 ++-- beacon_chain/spec/datatypes/base.nim | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 81284dd7ba..0222c3c5ea 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -853,7 +853,7 @@ template gossipMaxSize(T: untyped): uint32 = fixedPortionSize(T).uint32 elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock or - T is fulu.SignedBeaconBlock: + T is fulu.SignedBeaconBlock or T is fulu.DataColumnSidecar: GOSSIP_MAX_SIZE # TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for # Attestation, AttesterSlashing, and SignedAggregateAndProof, which all @@ -863,7 +863,7 @@ template gossipMaxSize(T: untyped): uint32 = T is phase0.SignedAggregateAndProof or T is phase0.SignedBeaconBlock or T is electra.SignedAggregateAndProof or T is electra.Attestation or T is electra.AttesterSlashing or T is altair.SignedBeaconBlock or - T is SomeForkyLightClientObject or T is fulu.DataColumnSidecar: + T is SomeForkyLightClientObject: GOSSIP_MAX_SIZE else: {.fatal: "unknown type " & name(T).} diff --git a/beacon_chain/spec/datatypes/base.nim b/beacon_chain/spec/datatypes/base.nim index f9f758b62c..18503682be 100644 --- a/beacon_chain/spec/datatypes/base.nim +++ b/beacon_chain/spec/datatypes/base.nim @@ -206,8 +206,6 @@ type ## The `BlobId` type is constrained to values in the range ## `[0, MAX_BLOBS_PER_BLOCK_ELECTRA)` during initialization. - - # BitVector[4] in the spec, ie 4 bits which end up encoded as a byte for # SSZ / hashing purposes JustificationBits* = distinct uint8 From 2f5e21603930b27ec8c4e93b0767d6b448895908 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Wed, 22 Jan 2025 17:45:09 +0530 Subject: [PATCH 17/60] fix test --- beacon_chain/sync/sync_queue.nim | 54 ++++---------------------------- tests/test_sync_manager.nim | 1 - 2 files changed, 6 insertions(+), 49 deletions(-) diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 20947c9fed..1ad4644f3d 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -644,24 +644,15 @@ func getOpt(data_columns: Opt[seq[DataColumnSidecars]], i: int): Opt[DataColumnS Opt.none DataColumnSidecars iterator blocks[T](sq: SyncQueue[T], - sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) = + sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars], + Opt[DataColumnSidecars]) = case sq.kind of SyncQueueKind.Forward: for i in countup(0, len(sr.data) - 1): - yield (sr.data[i], sr.blobs.getOpt(i)) + yield (sr.data[i], sr.blobs.getOpt(i), sr.dataColumns.getOpt(i)) of SyncQueueKind.Backward: for i in countdown(len(sr.data) - 1, 0): - yield (sr.data[i], sr.blobs.getOpt(i)) - -iterator das_blocks[T](sq: SyncQueue[T], - sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[DataColumnSidecars]) = - case sq.kind - of SyncQueueKind.Forward: - for i in countup(0, len(sr.data) - 1): - yield (sr.data[i], sr.data_columns.getOpt(i)) - of SyncQueueKind.Backward: - for i in countdown(len(sr.data) - 1, 0): - yield (sr.data[i], sr.data_columns.getOpt(i)) + yield (sr.data[i], sr.blobs.getOpt(i), sr.dataColumns.getOpt(i)) proc advanceOutput*[T](sq: SyncQueue[T], number: uint64) = case sq.kind @@ -795,8 +786,8 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], res: Result[void, VerifierError] var i=0 - for blk, blb in sq.blocks(item): - res = await sq.blockVerifier(blk[], blb, Opt.none(DataColumnSidecars), maybeFinalized) + for blk, blb, cols in sq.blocks(item): + res = await sq.blockVerifier(blk[], blb, cols, maybeFinalized) inc(i) if res.isOk(): @@ -826,39 +817,6 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], req.item.updateScore(PeerScoreBadValues) break - var counter = 0 - for blk, col in sq.das_blocks(item): - res = - await sq.blockVerifier(blk[], Opt.none(BlobSidecars), col, maybeFinalized) - inc counter - - if res.isOk: - goodBlock = some(blk[].slot) - else: - case res.error() - of VerifierError.MissingParent: - missingParentSlot = some(blk[].slot) - break - of VerifierError.Duplicate: - # Keep going, happens naturally - discard - of VerifierError.UnviableFork: - # Keep going so as to register other unviable blocks with the - # quarantine - if unviableBlock.isNone: - # Remember the first unviable block, so we can log it - unviableBlock = some((blk[].root, blk[].slot)) - - of VerifierError.Invalid: - hasInvalidBlock = true - - let req = item.request - notice "Received invalid sequence of blocks", request = req, - blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data) - req.item.updateScore(PeerScoreBadValues) - break - # When errors happen while processing blocks, we retry the same request # with, hopefully, a different peer let retryRequest = diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 43d810e77a..7a3d326a7a 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -399,7 +399,6 @@ suite "SyncManager test suite": Opt.none(seq[BlobSidecars]), Opt.none(seq[DataColumnSidecars])) await validatorFut.cancelAndWait() - waitFor runSmokeTest() case kkind of SyncQueueKind.Forward: From 1fc210ad45e6d69ae208e75534eda38e4ff7a343 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Fri, 24 Jan 2025 14:20:10 +0530 Subject: [PATCH 18/60] fix result situation in get data column sidecars --- beacon_chain/spec/peerdas_helpers.nim | 6 ++---- beacon_chain/validators/message_router.nim | 11 ++++------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index 3d3b447032..337457cbad 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -311,7 +311,7 @@ proc get_data_column_sidecars*(signed_beacon_block: fulu.SignedBeaconBlock, # blobs from blob bundles proc get_data_column_sidecars*(signed_beacon_block: fulu.SignedBeaconBlock, blobs: seq[KzgBlob]): - Result[seq[DataColumnSidecar], cstring] = + seq[DataColumnSidecar] = ## Given a signed beacon block and the blobs corresponding to the block, ## this function assembles the sidecars which can be distributed to ## the peers post data column reconstruction at every slot start. @@ -345,8 +345,6 @@ proc get_data_column_sidecars*(signed_beacon_block: fulu.SignedBeaconBlock, for i in 0..= ConsensusFork.Fulu: - newClone get_data_column_sidecars(blck, + newClone Opt.some(get_data_column_sidecars(blck, blobsOpt.get.mapIt( - KzgBlob(bytes: it.blob))) + KzgBlob(bytes: it.blob)))) else: newClone Opt.none(seq[DataColumnSidecar]) @@ -208,11 +208,8 @@ proc routeSignedBeaconBlock*( var dataColumnRefs = Opt.none(DataColumnSidecars) - let - dataColumns = - dataColumnsOpt[].get - - if dataColumns.len != 0: + if dataColumnsOpt[].isSome: + let dataColumns = dataColumnsOpt[].get() var das_workers = newSeq[Future[SendResult]](dataColumns.len) From 514bb3c06589d06f53ce449e3d94fe8aafd89af9 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Fri, 24 Jan 2025 17:50:46 +0530 Subject: [PATCH 19/60] fix message router issue --- beacon_chain/validators/message_router.nim | 28 ++++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 41871631d5..1ca8d7a81e 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -93,13 +93,6 @@ proc routeSignedBeaconBlock*( ## block passes validation but is not added, and error otherwise let wallTime = router[].getCurrentBeaconTime() - dataColumnsOpt = - when typeof(blck).kind >= ConsensusFork.Fulu: - newClone Opt.some(get_data_column_sidecars(blck, - blobsOpt.get.mapIt( - KzgBlob(bytes: it.blob)))) - else: - newClone Opt.none(seq[DataColumnSidecar]) # Data columns extraction done early in the function # in order to use the columns throughout. @@ -146,20 +139,22 @@ proc routeSignedBeaconBlock*( # May not be required as we are already # kzg verifying the blobs once elif typeof(blck).kind >= ConsensusFork.Fulu: - if dataColumnsOpt.isSome: + if blobsOpt.isSome: let - dataColumns = dataColumnsOpt.mapIt(it[].get) - + dataColumns = + newClone get_data_column_sidecars(blck, + blobsOpt.get.mapIt( + KzgBlob(bytes: it.blob))) let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq if dataColumns.len > 0 and kzgCommits.len > 0: - for i in 0..= ConsensusFork.Fulu: + newClone Opt.some(get_data_column_sidecars(blck, + blobsOpt.get.mapIt( + KzgBlob(bytes: it.blob)))) + else: + newClone Opt.none(seq[DataColumnSidecar]) if dataColumnsOpt[].isSome: let dataColumns = dataColumnsOpt[].get() var das_workers = From 4819e432fbab295b9e9a49724abc9cc91a22a135 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Fri, 24 Jan 2025 19:38:04 +0530 Subject: [PATCH 20/60] gate blob publishing upto deneb --- beacon_chain/validators/message_router.nim | 111 +++++++++++---------- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 1ca8d7a81e..060ef6e3b3 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -183,68 +183,69 @@ proc routeSignedBeaconBlock*( signature = shortLog(blck.signature), error = res.error() var blobRefs = Opt.none(BlobSidecars) - if blobsOpt.isSome(): - let blobs = blobsOpt.get() - var workers = newSeq[Future[SendResult]](blobs.len) - for i in 0..= ConsensusFork.Fulu: - newClone Opt.some(get_data_column_sidecars(blck, - blobsOpt.get.mapIt( - KzgBlob(bytes: it.blob)))) - else: - newClone Opt.none(seq[DataColumnSidecar]) - if dataColumnsOpt[].isSome: - let dataColumns = dataColumnsOpt[].get() - var das_workers = - newSeq[Future[SendResult]](dataColumns.len) - - for i in 0..= ConsensusFork.Deneb: + if blobsOpt.isSome(): + let blobs = blobsOpt.get() + var workers = newSeq[Future[SendResult]](blobs.len) + for i in 0..= ConsensusFork.Fulu: + let dataColumnsOpt = + when typeof(blck).kind >= ConsensusFork.Fulu: + newClone Opt.some(get_data_column_sidecars(blck, + blobsOpt.get.mapIt( + KzgBlob(bytes: it.blob)))) + else: + newClone Opt.none(seq[DataColumnSidecar]) + if dataColumnsOpt[].isSome: + let dataColumns = dataColumnsOpt[].get() + var das_workers = + newSeq[Future[SendResult]](dataColumns.len) + + for i in 0.. Date: Fri, 24 Jan 2025 19:49:13 +0530 Subject: [PATCH 21/60] fix message router blob and column progressions --- beacon_chain/validators/message_router.nim | 38 +++++++++++----------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 060ef6e3b3..7775579364 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -185,25 +185,7 @@ proc routeSignedBeaconBlock*( var blobRefs = Opt.none(BlobSidecars) var dataColumnRefs = Opt.none(DataColumnSidecars) - when typeof(blck).kind >= ConsensusFork.Deneb: - if blobsOpt.isSome(): - let blobs = blobsOpt.get() - var workers = newSeq[Future[SendResult]](blobs.len) - for i in 0..= ConsensusFork.Fulu: + when typeof(blck).kind >= ConsensusFork.Fulu: let dataColumnsOpt = when typeof(blck).kind >= ConsensusFork.Fulu: newClone Opt.some(get_data_column_sidecars(blck, @@ -247,6 +229,24 @@ proc routeSignedBeaconBlock*( final_columns.add dc dataColumnRefs = Opt.some(final_columns.mapIt(newClone(it))) + elif typeof(blck).kind >= ConsensusFork.Deneb: + if blobsOpt.isSome(): + let blobs = blobsOpt.get() + var workers = newSeq[Future[SendResult]](blobs.len) + for i in 0.. Date: Fri, 24 Jan 2025 21:46:45 +0530 Subject: [PATCH 22/60] drop dataColumnOpt from message router --- beacon_chain/validators/message_router.nim | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 7775579364..a553486d4e 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -186,15 +186,9 @@ proc routeSignedBeaconBlock*( var dataColumnRefs = Opt.none(DataColumnSidecars) when typeof(blck).kind >= ConsensusFork.Fulu: - let dataColumnsOpt = - when typeof(blck).kind >= ConsensusFork.Fulu: - newClone Opt.some(get_data_column_sidecars(blck, - blobsOpt.get.mapIt( - KzgBlob(bytes: it.blob)))) - else: - newClone Opt.none(seq[DataColumnSidecar]) - if dataColumnsOpt[].isSome: - let dataColumns = dataColumnsOpt[].get() + if blobsOpt.isSome(): + let dataColumns = + get_data_column_sidecars(blck, blobsOpt.get.mapIt(KzgBlob(bytes: it.blob))) var das_workers = newSeq[Future[SendResult]](dataColumns.len) From 9b5feb694c90f4cc52d5417e9e12a99c0e80f811 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 25 Jan 2025 10:52:26 +0530 Subject: [PATCH 23/60] reversing rman blockVerifier order --- beacon_chain/nimbus_beacon_node.nim | 30 ++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index e25a46d600..0211fb35a9 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -476,21 +476,8 @@ proc initFullNode( maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = withBlck(signedBlock): - when consensusFork >= ConsensusFork.Deneb: - if not blobQuarantine[].hasBlobs(forkyBlck): - # We don't have all the blobs for this block, so we have - # to put it in blobless quarantine. - if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) - else: - err(VerifierError.MissingParent) - else: - let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) - await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.some(blobs), Opt.none(DataColumnSidecars), - maybeFinalized = maybeFinalized) - - elif consensusFork >= ConsensusFork.Fulu: + # Keeping Fulu first else >= Deneb means Fulu case never hits + when consensusFork >= ConsensusFork.Fulu: if not dataColumnQuarantine[].supernode and not dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): @@ -509,6 +496,19 @@ proc initFullNode( await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, Opt.none(BlobSidecars), Opt.some(dataColumns), maybeFinalized = maybeFinalized) + elif consensusFork >= ConsensusFork.Deneb: + if not blobQuarantine[].hasBlobs(forkyBlck): + # We don't have all the blobs for this block, so we have + # to put it in blobless quarantine. + if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VerifierError.MissingParent) + else: + let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) + await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, + Opt.some(blobs), Opt.none(DataColumnSidecars), + maybeFinalized = maybeFinalized) else: await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, From 3592d34e65138718e6a80792ddd376e2351330ff Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 25 Jan 2025 11:11:00 +0530 Subject: [PATCH 24/60] fixes --- beacon_chain/nimbus_beacon_node.nim | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 0211fb35a9..6f4b13324a 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -481,15 +481,15 @@ proc initFullNode( if not dataColumnQuarantine[].supernode and not dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) + return err(VerifierError.UnviableFork) else: - err(VeriferError.MissingParent) + return err(VerifierError.MissingParent) elif dataColumnQuarantine[].supernode and - not dataColumnQuaratine[].hasEnoughDataColumns(forkyBlck): + not dataColumnQuarantine[].hasEnoughDataColumns(forkyBlck): if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) + return err(VerifierError.UnviableFork) else: - err(VeriferError.MissingParent) + return err(VerifierError.MissingParent) else: let dataColumns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck) @@ -501,9 +501,9 @@ proc initFullNode( # We don't have all the blobs for this block, so we have # to put it in blobless quarantine. if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) + return err(VerifierError.UnviableFork) else: - err(VerifierError.MissingParent) + return err(VerifierError.MissingParent) else: let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, From e7bc4362893a0b17b50c0065747feb90f638fcd7 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 25 Jan 2025 13:37:27 +0530 Subject: [PATCH 25/60] several fixes --- .../data_column_quarantine.nim | 11 +++++----- .../gossip_processing/block_processor.nim | 9 ++++++-- .../gossip_processing/eth2_processor.nim | 21 +++++------------- beacon_chain/nimbus_beacon_node.nim | 19 ++++++++++++++++ beacon_chain/spec/peerdas_helpers.nim | 4 ++-- beacon_chain/validators/message_router.nim | 22 +++++++++++-------- 6 files changed, 52 insertions(+), 34 deletions(-) diff --git a/beacon_chain/consensus_object_pools/data_column_quarantine.nim b/beacon_chain/consensus_object_pools/data_column_quarantine.nim index b88088dd25..093227f47c 100644 --- a/beacon_chain/consensus_object_pools/data_column_quarantine.nim +++ b/beacon_chain/consensus_object_pools/data_column_quarantine.nim @@ -139,13 +139,12 @@ func hasMissingDataColumns*(quarantine: DataColumnQuarantine, index: idx) if dc_identifier notin quarantine.data_columns: inc col_counter - if quarantine.supernode and col_counter != NUMBER_OF_COLUMNS: - return false - elif quarantine.supernode == false and - col_counter != max(SAMPLES_PER_SLOT, CUSTODY_REQUIREMENT): - return false - else: + if quarantine.supernode and col_counter == NUMBER_OF_COLUMNS: + return true + if quarantine.supernode == false and + col_counter == max(SAMPLES_PER_SLOT, CUSTODY_REQUIREMENT): return true + false func hasEnoughDataColumns*(quarantine: DataColumnQuarantine, blck: fulu.SignedBeaconBlock): bool = diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index c622f33a14..b8f4385251 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -623,9 +623,9 @@ proc storeBlock( withBlck(parentBlck.get()): when consensusFork >= ConsensusFork.Fulu: var data_column_sidecars: DataColumnSidecars - for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: + for i in self.dataColumnQuarantine[].custody_columns: let data_column = DataColumnSidecar.new() - if not dag.db.getDataColumnSidecar(parent_root, i.uint64, data_column[]): + if not dag.db.getDataColumnSidecar(parent_root, i.ColumnIndex, data_column[]): columnsOk = false break data_column_sidecars.add data_column @@ -790,6 +790,11 @@ proc storeBlock( for b in blobs: self.consensusManager.dag.db.putBlobSidecar(b[]) + # write data columns now that block has been written + let data_columns = dataColumnsOpt.valueOr: DataColumnSidecars @[] + for col in data_columns: + self.consensusManager.dag.db.putDataColumnSidecar(col[]) + let addHeadBlockTick = Moment.now() # Eagerly update head: the incoming block "should" get selected. diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 1f918f6029..737ebe4394 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -261,22 +261,13 @@ proc processSignedBeaconBlock*( let columns = when typeof(signedBlock).kind >= ConsensusFork.Fulu: - if self.dataColumnQuarantine[].supernode: - if self.dataColumnQuarantine[].hasEnoughDataColumns(signedBlock): - Opt.some(self.dataColumnQuarantine[].popDataColumns(signedBlock.root, - signedBlock)) - else: - discard self.quarantine[].addColumnless(self.dag.finalizedHead.slot, - signedBlock) - return v + if self.dataColumnQuarantine[].hasMissingDataColumns(signedBlock): + Opt.some(self.dataColumnQuarantine[].popDataColumns(signedBlock.root, + signedBlock)) else: - if self.dataColumnQuarantine[].hasMissingDataColumns(signedBlock): - Opt.some(self.dataColumnQuarantine[].popDataColumns(signedBlock.root, - signedBlock)) - else: - discard self.quarantine[].addColumnless(self.dag.finalizedHead.slot, - signedBlock) - return v + discard self.quarantine[].addColumnless(self.dag.finalizedHead.slot, + signedBlock) + return v else: Opt.none(DataColumnSidecars) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 6f4b13324a..1762a7000c 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1660,6 +1660,24 @@ proc pruneBlobs(node: BeaconNode, slot: Slot) = count = count + 1 debug "pruned blobs", count, blobPruneEpoch +proc pruneDataColumns(node: BeaconNode, slot: Slot) = + let dataColumnPruneEpoch = (slot.epoch - + node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) + if slot.is_epoch() and dataColumnPruneEpoch >= node.dag.cfg.FULU_FORK_EPOCH: + var blocks: array[SLOTS_PER_EPOCH.int, BlockId] + var count = 0 + let startIndex = node.dag.getBlockRange( + dataColumnPruneEpoch.start_slot, blocks.toopenArray(0, SLOTS_PER_EPOCH - 1)) + for i in startIndex.. 0 and kzgCommits.len > 0: + if dataColumns[].get().len > 0 and kzgCommits.len > 0: for i in 0..= ConsensusFork.Fulu: - if blobsOpt.isSome(): - let dataColumns = - get_data_column_sidecars(blck, blobsOpt.get.mapIt(KzgBlob(bytes: it.blob))) + let blobs = blobsOpt.get + if blobs.len != 0: + let dataColumnsRes = + newClone get_data_column_sidecars(blck, blobs.mapIt(KzgBlob(bytes: it.blob))) + if not dataColumnsRes[].isOk: + debug "Issue with extracting data columns from blob bundle" + let dataColumns = dataColumnsRes[].get() var das_workers = newSeq[Future[SendResult]](dataColumns.len) for i in 0.. Date: Sat, 25 Jan 2025 14:47:11 +0530 Subject: [PATCH 26/60] added debug logs for devnet testing --- beacon_chain/nimbus_beacon_node.nim | 2 +- beacon_chain/validators/message_router.nim | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 1762a7000c..f4149d4435 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1667,7 +1667,7 @@ proc pruneDataColumns(node: BeaconNode, slot: Slot) = var blocks: array[SLOTS_PER_EPOCH.int, BlockId] var count = 0 let startIndex = node.dag.getBlockRange( - dataColumnPruneEpoch.start_slot, blocks.toopenArray(0, SLOTS_PER_EPOCH - 1)) + dataColumnPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) for i in startIndex..= ConsensusFork.Fulu: let blobs = blobsOpt.get + debugEcho blobs.len if blobs.len != 0: let dataColumnsRes = newClone get_data_column_sidecars(blck, blobs.mapIt(KzgBlob(bytes: it.blob))) if not dataColumnsRes[].isOk: debug "Issue with extracting data columns from blob bundle" let dataColumns = dataColumnsRes[].get() + debugEcho "Datacolumns len" + debugEcho dataColumns.len var das_workers = newSeq[Future[SendResult]](dataColumns.len) @@ -222,7 +225,7 @@ proc routeSignedBeaconBlock*( metadata)) var final_columns: seq[DataColumnSidecar] - for dc in data_columns: + for dc in dataColumns: if dc.index in custody_columns: final_columns.add dc dataColumnRefs = Opt.some(final_columns.mapIt(newClone(it))) From 749a5a9e7a29266403f1880c67cba4b6fb7c3ee3 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 25 Jan 2025 14:50:03 +0530 Subject: [PATCH 27/60] add blobsOpt isSome check --- beacon_chain/validators/message_router.nim | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 4c60e119f2..3df413f3a6 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -94,9 +94,6 @@ proc routeSignedBeaconBlock*( let wallTime = router[].getCurrentBeaconTime() - # Data columns extraction done early in the function - # in order to use the columns throughout. - block: let vindex = ValidatorIndex(blck.message.proposer_index) if checkValidator and (vindex in router.processor.validatorPool[]): @@ -137,7 +134,7 @@ proc routeSignedBeaconBlock*( return err(res.error()) # May not be required as we are already - # kzg verifying the blobs once + # KZG verifying the blobs once elif typeof(blck).kind >= ConsensusFork.Fulu: if blobsOpt.isSome: let @@ -188,7 +185,7 @@ proc routeSignedBeaconBlock*( when typeof(blck).kind >= ConsensusFork.Fulu: let blobs = blobsOpt.get debugEcho blobs.len - if blobs.len != 0: + if blobsOpt.isSome() and blobs.len != 0: let dataColumnsRes = newClone get_data_column_sidecars(blck, blobs.mapIt(KzgBlob(bytes: it.blob))) if not dataColumnsRes[].isOk: From ceff7058b74e30bc0c4a39a99dafce15559cf934 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sun, 26 Jan 2025 01:40:52 +0530 Subject: [PATCH 28/60] fix copyright years --- beacon_chain/beacon_chain_file.nim | 2 +- beacon_chain/consensus_object_pools/block_pools_types.nim | 2 +- beacon_chain/consensus_object_pools/blockchain_list.nim | 2 +- beacon_chain/consensus_object_pools/data_column_quarantine.nim | 2 +- beacon_chain/gossip_processing/block_processor.nim | 2 +- beacon_chain/gossip_processing/eth2_processor.nim | 2 +- beacon_chain/validators/message_router.nim | 2 +- tests/test_block_processor.nim | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index 52cf9be55c..a07a6894dd 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index cad7cd8bae..47346b1754 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index 41284b956a..e4c5e59b5b 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/beacon_chain/consensus_object_pools/data_column_quarantine.nim b/beacon_chain/consensus_object_pools/data_column_quarantine.nim index 093227f47c..1091f0b3f3 100644 --- a/beacon_chain/consensus_object_pools/data_column_quarantine.nim +++ b/beacon_chain/consensus_object_pools/data_column_quarantine.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index b8f4385251..e591f8bafb 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 737ebe4394..00cbd40fbb 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 3df413f3a6..68e75d35ef 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). diff --git a/tests/test_block_processor.nim b/tests/test_block_processor.nim index bfff207a29..8d690ab172 100644 --- a/tests/test_block_processor.nim +++ b/tests/test_block_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). From d82c3f5b517448bc1eb83e9bba6300145a7a7621 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sun, 26 Jan 2025 21:54:54 +0530 Subject: [PATCH 29/60] couple of fixes and debug logs --- beacon_chain/nimbus_beacon_node.nim | 27 ++++++----- beacon_chain/spec/peerdas_helpers.nim | 2 + beacon_chain/validators/message_router.nim | 55 +++++++++++----------- 3 files changed, 44 insertions(+), 40 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index f4149d4435..ded0b9da30 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -2028,6 +2028,20 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processSignedBeaconBlock( MsgSource.gossip, signedBlock))) + # data_column_sidecar_{subnet_id} + when consensusFork >= ConsensusFork.Fulu: + # data_column_sidecar_{subnet_id} + for it in 0'u64..= ConsensusFork.Electra: @@ -2164,19 +2178,6 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processBlobSidecar( MsgSource.gossip, blobSidecar, subnet_id))) - when consensusFork >= ConsensusFork.Fulu: - # data_column_sidecar_{subnet_id} - for it in 0'u64..= ConsensusFork.Fulu: + # if blobsOpt.isSome: + # let + # dataColumns = + # newClone get_data_column_sidecars(blck, + # blobsOpt.get.mapIt( + # KzgBlob(bytes: it.blob))) + # let kzgCommits = + # signedBlock.message.body.blob_kzg_commitments.asSeq + # if dataColumns[].get().len > 0 and kzgCommits.len > 0: + # for i in 0..= ConsensusFork.Deneb: if blobsOpt.isSome: let blobs = blobsOpt.get() @@ -133,30 +157,6 @@ proc routeSignedBeaconBlock*( msg = res.error() return err(res.error()) - # May not be required as we are already - # KZG verifying the blobs once - elif typeof(blck).kind >= ConsensusFork.Fulu: - if blobsOpt.isSome: - let - dataColumns = - newClone get_data_column_sidecars(blck, - blobsOpt.get.mapIt( - KzgBlob(bytes: it.blob))) - let kzgCommits = - signedBlock.message.body.blob_kzg_commitments.asSeq - if dataColumns[].get().len > 0 and kzgCommits.len > 0: - for i in 0.. Date: Mon, 27 Jan 2025 11:13:16 +0530 Subject: [PATCH 30/60] fix issue --- beacon_chain/validators/message_router.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index a3df049fac..1e1d324345 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -203,7 +203,7 @@ proc routeSignedBeaconBlock*( das_workers[i] = router[].network.broadcastDataColumnSidecar(subnet_id, - dataColumns[i].index) + dataColumns[i]) let allres = await allFinished(das_workers) for i in 0.. Date: Mon, 27 Jan 2025 17:18:26 +0530 Subject: [PATCH 31/60] resolved review comments, enabled more debug logs, fixed a couple of things --- .../gossip_processing/block_processor.nim | 166 +++++++++++------- beacon_chain/nimbus_beacon_node.nim | 15 +- beacon_chain/spec/peerdas_helpers.nim | 8 +- beacon_chain/sync/sync_queue.nim | 4 +- 4 files changed, 113 insertions(+), 80 deletions(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index e591f8bafb..b2243c05a4 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -190,29 +190,6 @@ proc storeBackfillBlock( # The block is certainly not missing any more self.consensusManager.quarantine[].missing.del(signedBlock.root) - # Establish blob viability before calling addbackfillBlock to avoid - # writing the block in case of blob error. - var blobsOk = true - when typeof(signedBlock).kind >= ConsensusFork.Deneb: - if blobsOpt.isSome: - let blobs = blobsOpt.get() - let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq - if blobs.len > 0 or kzgCommits.len > 0: - let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), - blobs.mapIt(it.kzg_proof)) - if r.isErr(): - debug "backfill blob validation failed", - blockRoot = shortLog(signedBlock.root), - blobs = shortLog(blobs), - blck = shortLog(signedBlock.message), - kzgCommits = mapIt(kzgCommits, shortLog(it)), - signature = shortLog(signedBlock.signature), - msg = r.error() - blobsOk = r.isOk() - - if not blobsOk: - return err(VerifierError.Invalid) - var columnsOk = true when typeof(signedBlock).kind >= ConsensusFork.Fulu: var malformed_cols: seq[int] @@ -254,6 +231,29 @@ proc storeBackfillBlock( if not columnsOk: return err(VerifierError.Invalid) + # Establish blob viability before calling addbackfillBlock to avoid + # writing the block in case of blob error. + var blobsOk = true + when typeof(signedBlock).kind >= ConsensusFork.Deneb: + if blobsOpt.isSome: + let blobs = blobsOpt.get() + let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq + if blobs.len > 0 or kzgCommits.len > 0: + let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)), + blobs.mapIt(it.kzg_proof)) + if r.isErr(): + debug "backfill blob validation failed", + blockRoot = shortLog(signedBlock.root), + blobs = shortLog(blobs), + blck = shortLog(signedBlock.message), + kzgCommits = mapIt(kzgCommits, shortLog(it)), + signature = shortLog(signedBlock.signature), + msg = r.error() + blobsOk = r.isOk() + + if not blobsOk: + return err(VerifierError.Invalid) + let res = self.consensusManager.dag.addBackfillBlock(signedBlock) if res.isErr(): @@ -428,19 +428,19 @@ proc getExecutionValidity( blck = shortLog(blck) return NewPayloadStatus.noResponse -proc checkBloblessSignature( +proc checkBlobOrColumnlessSignature( self: BlockProcessor, signed_beacon_block: deneb.SignedBeaconBlock | electra.SignedBeaconBlock | fulu.SignedBeaconBlock): Result[void, cstring] = let dag = self.consensusManager.dag let parent = dag.getBlockRef(signed_beacon_block.message.parent_root).valueOr: - return err("checkBloblessSignature called with orphan block") + return err("checkBlobOrColumnlessSignature called with orphan block") let proposer = getProposer( dag, parent, signed_beacon_block.message.slot).valueOr: - return err("checkBloblessSignature: Cannot compute proposer") + return err("checkBlobOrColumnlessSignature: Cannot compute proposer") if distinctBase(proposer) != signed_beacon_block.message.proposer_index: - return err("checkBloblessSignature: Incorrect proposer") + return err("checkBlobOrColumnlessSignature: Incorrect proposer") if not verify_block_signature( dag.forkAtEpoch(signed_beacon_block.message.slot.epoch), getStateField(dag.headState, genesis_validators_root), @@ -448,7 +448,7 @@ proc checkBloblessSignature( signed_beacon_block.root, dag.validatorKey(proposer).get(), signed_beacon_block.signature): - return err("checkBloblessSignature: Invalid proposer signature") + return err("checkBlobOrColumnlessSignature: Invalid proposer signature") ok() proc enqueueBlock*( @@ -598,6 +598,21 @@ proc storeBlock( parent_root = signedBlock.message.parent_root parentBlck = dag.getForkedBlock(parent_root) if parentBlck.isSome(): + var columnsOk = true + let columns = + withBlck(parentBlck.get()): + when consensusFork >= ConsensusFork.Fulu: + var data_column_sidecars: DataColumnSidecars + for i in self.dataColumnQuarantine[].custody_columns: + let data_column = DataColumnSidecar.new() + if not dag.db.getDataColumnSidecar(parent_root, i.ColumnIndex, data_column[]): + columnsOk = false + break + data_column_sidecars.add data_column + Opt.some data_column_sidecars + else: + Opt.none DataColumnSidecars + var blobsOk = true let blobs = withBlck(parentBlck.get()): @@ -612,27 +627,24 @@ proc storeBlock( Opt.some blob_sidecars else: Opt.none BlobSidecars - if blobsOk: + # Blobs and columns can never co-exist in the same block + doAssert blobs.isSome and columns.isSome + # Block has neither blob sidecar nor data column sidecar + if blobs.isNone and columns.isNone: + debug "Loaded parent block from storage", parent_root + self[].enqueueBlock( + MsgSource.gossip, parentBlck.unsafeGet().asSigned(), Opt.none(BlobSidecars), + Opt.none(DataColumnSidecars)) + # Block has blob sidecars associated and NO data column sidecars + # as they cannot co-exist. + if blobsOk and blobs.isSome: debug "Loaded parent block from storage", parent_root self[].enqueueBlock( MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars)) - - var columnsOk = true - let columns = - withBlck(parentBlck.get()): - when consensusFork >= ConsensusFork.Fulu: - var data_column_sidecars: DataColumnSidecars - for i in self.dataColumnQuarantine[].custody_columns: - let data_column = DataColumnSidecar.new() - if not dag.db.getDataColumnSidecar(parent_root, i.ColumnIndex, data_column[]): - columnsOk = false - break - data_column_sidecars.add data_column - Opt.some data_column_sidecars - else: - Opt.none DataColumnSidecars - if columnsOk: + # Block has data column sidecars associated and NO blob sidecars + # as they cannot co-exist. + if columnsOk and columns.isSome: debug "Loaded parent block from storage", parent_root self[].enqueueBlock( MsgSource.gossip, parentBlck.unsafeGet().asSigned(), Opt.none(BlobSidecars), @@ -706,10 +718,26 @@ proc storeBlock( let newPayloadTick = Moment.now() + when typeof(signedBlock).kind >= ConsensusFork.Fulu: + if dataColumnsOpt.isSome: + let columns = dataColumnsOpt.get() + let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq + if columns.len > 0 and kzgCommits.len > 0: + for i in 0..= ConsensusFork.Deneb: + elif typeof(signedBlock).kind >= ConsensusFork.Deneb: if blobsOpt.isSome: let blobs = blobsOpt.get() let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq @@ -726,24 +754,6 @@ proc storeBlock( msg = r.error() return err((VerifierError.Invalid, ProcessingStatus.completed)) - elif typeof(signedBlock).kind >= ConsensusFork.Fulu: - if dataColumnsOpt.isSome: - let columns = dataColumnsOpt.get() - let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq - if columns.len > 0 and kzgCommits.len > 0: - for i in 0..= ConsensusFork.Fulu: if len(forkyBlck.message.body.blob_kzg_commitments) == 0: self[].enqueueBlock( MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[]), Opt.some(DataColumnSidecars @[])) else: - if (let res = checkBloblessSignature(self[], forkyBlck); res.isErr): + if (let res = checkBlobOrColumnlessSignature(self[], + forkyBlck); + res.isErr): warn "Failed to verify signature of unorphaned blobless block", blck = shortLog(forkyBlck), error = res.error() continue + if self.dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): + let columns = self.dataColumnQuarantine[].popDataColumns( + forkyBlck.root, forkyBlck) + self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.none(BlobSidecars), + Opt.some(columns)) + else: + discard self.consensusManager.quarantine[].addBlobless( + dag.finalizedHead.slot, forkyBlck) + elif typeof(forkyBlck).kind >= ConsensusFork.Deneb: + if len(forkyBlck.message.body.blob_kzg_commitments) == 0: + self[].enqueueBlock( + MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[]), + Opt.some(DataColumnSidecars @[])) + else: + if (let res = checkBlobOrColumnlessSignature(self[], + forkyBlck); + res.isErr): + warn "Failed to verify signature of unorphaned columnless block", + blck = shortLog(forkyBlck), + error = res.error() + continue if self.blobQuarantine[].hasBlobs(forkyBlck): let blobs = self.blobQuarantine[].popBlobs( forkyBlck.root, forkyBlck) @@ -959,6 +992,7 @@ proc storeBlock( discard self.consensusManager.quarantine[].addBlobless( dag.finalizedHead.slot, forkyBlck) + ok blck.value() # Enqueue diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index ded0b9da30..0424b5435d 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1308,11 +1308,11 @@ func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = subnets + node.getNextSyncCommitteeSubnets(epoch) -func readCustodyGroupSubnets*(node: BeaconNode): uint64= - var res = CUSTODY_REQUIREMENT.uint64 +func readCustodyGroupSubnets(node: BeaconNode): uint64 = if node.config.peerdasSupernode: - res = NUMBER_OF_CUSTODY_GROUPS.uint64 - res + NUMBER_OF_CUSTODY_GROUPS.uint64 + else: + CUSTODY_REQUIREMENT.uint64 proc addAltairMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = @@ -1355,10 +1355,9 @@ proc addFuluMessageHandlers( custody = node.network.nodeId.get_custody_groups(max(SAMPLES_PER_SLOT.uint64, targetSubnets.uint64)) - for i in 0'u64..= MAX_BLOBS_PER_BLOCK - - if lenu64(data) > (req.count * MAX_BLOBS_PER_BLOCK_ELECTRA): + if lenu64(data) > (req.count * NUMBER_OF_COLUMNS): # Number of data columns in response should be less or equal to # number of requested (blocks * MAX_BLOCKS_PER_BLOCK_ELECTRA). return err("Too many data columns received") From 7b3304becd3b7e2bc23ade0d73910a2cc37cbb07 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 27 Jan 2025 18:28:33 +0530 Subject: [PATCH 32/60] fix indentation --- beacon_chain/validators/message_router.nim | 46 +++++++++++----------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 1e1d324345..35ae56f001 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -204,29 +204,29 @@ proc routeSignedBeaconBlock*( das_workers[i] = router[].network.broadcastDataColumnSidecar(subnet_id, dataColumns[i]) - let allres = await allFinished(das_workers) - for i in 0..= ConsensusFork.Deneb: if blobsOpt.isSome(): From 97a190fc45f10ce6749e61a83b54072b4843a46d Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 27 Jan 2025 19:30:12 +0530 Subject: [PATCH 33/60] limit processBlobSidecar < Fulu --- beacon_chain/nimbus_beacon_node.nim | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 0424b5435d..6257d9b382 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1359,6 +1359,9 @@ proc addFuluMessageHandlers( let topic = getDataColumnSidecarTopic(forkDigest, i) node.network.subscribe(topic, basicParams) + for topic in blobSidecarTopics(forkDigest): + node.network.unsubscribe(topic) + proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removePhase0MessageHandlers(forkDigest) @@ -2163,7 +2166,8 @@ proc installMessageValidators(node: BeaconNode) = await node.processor.processBlsToExecutionChange( MsgSource.gossip, msg))) - when consensusFork >= ConsensusFork.Deneb: + when consensusFork >= ConsensusFork.Deneb and + consensusFork < ConsensusFork.Fulu: # blob_sidecar_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id for it in BlobId: From 947a71ba7987c3db5c6225f909b9f40043e12043 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 27 Jan 2025 20:54:06 +0530 Subject: [PATCH 34/60] try to gate a few operations to < Fulu --- beacon_chain/gossip_processing/block_processor.nim | 6 ++++-- beacon_chain/nimbus_beacon_node.nim | 3 ++- beacon_chain/spec/peerdas_helpers.nim | 6 ++---- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index b2243c05a4..dc937b2157 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -234,7 +234,8 @@ proc storeBackfillBlock( # Establish blob viability before calling addbackfillBlock to avoid # writing the block in case of blob error. var blobsOk = true - when typeof(signedBlock).kind >= ConsensusFork.Deneb: + when typeof(signedBlock).kind >= ConsensusFork.Deneb and + typeof(signedBlock).kind < ConsensusFork.Fulu: if blobsOpt.isSome: let blobs = blobsOpt.get() let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq @@ -616,7 +617,8 @@ proc storeBlock( var blobsOk = true let blobs = withBlck(parentBlck.get()): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork >= ConsensusFork.Deneb and + consensusFork < ConsensusFork.Fulu: var blob_sidecars: BlobSidecars for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: let blob = BlobSidecar.new() diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 6257d9b382..a3e5a6c2d0 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -496,7 +496,8 @@ proc initFullNode( await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, Opt.none(BlobSidecars), Opt.some(dataColumns), maybeFinalized = maybeFinalized) - elif consensusFork >= ConsensusFork.Deneb: + elif consensusFork >= ConsensusFork.Deneb and + consensusFork < ConsensusFork.Fulu: if not blobQuarantine[].hasBlobs(forkyBlck): # We don't have all the blobs for this block, so we have # to put it in blobless quarantine. diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index 4bed7ec0e4..434c8d9297 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -437,17 +437,15 @@ proc verify_data_column_sidecar_kzg_proofs*(sidecar: DataColumnSidecar): return err("Sidecar kzg_commitments length is not equal to the kzg_proofs length") # Iterate through the cell indices - var cellIndices = - newSeq[CellIndex](MAX_BLOB_COMMITMENTS_PER_BLOCK) + var cellIndices = newSeqOfCap[CellIndex](sidecar.column.len) for _ in 0.. Date: Mon, 27 Jan 2025 21:46:55 +0530 Subject: [PATCH 35/60] gate more --- beacon_chain/gossip_processing/block_processor.nim | 7 ++++--- beacon_chain/gossip_processing/eth2_processor.nim | 6 ++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index dc937b2157..71b70345e5 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -28,7 +28,7 @@ from ../consensus_object_pools/block_dag import BlockRef, root, shortLog, slot from ../consensus_object_pools/block_pools_types import EpochRef, VerifierError from ../consensus_object_pools/block_quarantine import - addBlobless, addOrphan, addUnviable, pop, removeOrphan + addBlobless, addColumnless, addOrphan, addUnviable, pop, removeOrphan from ../consensus_object_pools/blob_quarantine import BlobQuarantine, hasBlobs, popBlobs, put from ../consensus_object_pools/data_column_quarantine import @@ -970,9 +970,10 @@ proc storeBlock( self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.none(BlobSidecars), Opt.some(columns)) else: - discard self.consensusManager.quarantine[].addBlobless( + discard self.consensusManager.quarantine[].addColumnless( dag.finalizedHead.slot, forkyBlck) - elif typeof(forkyBlck).kind >= ConsensusFork.Deneb: + elif typeof(forkyBlck).kind >= ConsensusFork.Deneb and + typeof(forkyBlck).kind < ConsensusFork.Fulu: if len(forkyBlck.message.body.blob_kzg_commitments) == 0: self[].enqueueBlock( MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[]), diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 00cbd40fbb..31a9e65ac5 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -249,7 +249,8 @@ proc processSignedBeaconBlock*( trace "Block validated" let blobs = - when typeof(signedBlock).kind >= ConsensusFork.Deneb: + when typeof(signedBlock).kind >= ConsensusFork.Deneb and + typeof(signedBlock).kind < ConsensusFork.Fulu: if self.blobQuarantine[].hasBlobs(signedBlock): Opt.some(self.blobQuarantine[].popBlobs(signedBlock.root, signedBlock)) else: @@ -324,7 +325,8 @@ proc processBlobSidecar*( if (let o = self.quarantine[].popBlobless(block_root); o.isSome): let blobless = o.unsafeGet() withBlck(blobless): - when consensusFork >= ConsensusFork.Deneb: + when consensusFork >= ConsensusFork.Deneb and + consensusFork < ConsensusFork.Fulu: if self.blobQuarantine[].hasBlobs(forkyBlck): self.blockProcessor[].enqueueBlock( MsgSource.gossip, blobless, From 0afb4a2edcbf4b0bc98ab99b62e82fe015d26ebc Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 28 Jan 2025 13:01:53 +0530 Subject: [PATCH 36/60] halt rman blob loop post fulu fork epoch --- .../gossip_processing/eth2_processor.nim | 1 + beacon_chain/networking/eth2_network.nim | 4 +- beacon_chain/nimbus_beacon_node.nim | 39 +++++++++++++------ beacon_chain/sync/request_manager.nim | 4 ++ 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 31a9e65ac5..bcd326d36b 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -371,6 +371,7 @@ proc processDataColumnSidecar*( debug "Data column validated, putting data column in quarantine" self.dataColumnQuarantine[].put(newClone(dataColumnSidecar)) + self.dag.db.putDataColumnSidecar(dataColumnSidecar) let block_root = hash_tree_root(block_header) if (let o = self.quarantine[].popColumnless(block_root); o.isSome): diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 0222c3c5ea..85c7199ce8 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -86,8 +86,8 @@ type validTopics: HashSet[string] peerPingerHeartbeatFut: Future[void].Raising([CancelledError]) peerTrimmerHeartbeatFut: Future[void].Raising([CancelledError]) - cfg: RuntimeConfig - getBeaconTime: GetBeaconTimeFn + cfg*: RuntimeConfig + getBeaconTime*: GetBeaconTimeFn quota: TokenBucket ## Global quota mainly for high-bandwidth stuff diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index a3e5a6c2d0..b1a5b7cc3c 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -478,24 +478,39 @@ proc initFullNode( withBlck(signedBlock): # Keeping Fulu first else >= Deneb means Fulu case never hits when consensusFork >= ConsensusFork.Fulu: - if not dataColumnQuarantine[].supernode and - not dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): + let + accumulatedDataColumns = dataColumnQuarantine[].gatherDataColumns(forkyBlck.root) + + if dataColumnQuarantine[].supernode and + accumulatedDataColumns.len <= dataColumnQuarantine[].custody_columns.len: + # We don't have the requisite number of data columns for this block yet, + # so we have put in columnless quarantine as a supernode if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): return err(VerifierError.UnviableFork) else: return err(VerifierError.MissingParent) - elif dataColumnQuarantine[].supernode and - not dataColumnQuarantine[].hasEnoughDataColumns(forkyBlck): + + elif not dataColumnQuarantine[].supernode and + accumulatedDataColumns.len <= dataColumnQuarantine[].custody_columns.len: + # We don't have the requisite number of data columns for this block yet, + # so we have put in columnless quarantine as fullnode if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): return err(VerifierError.UnviableFork) else: return err(VerifierError.MissingParent) - else: - let dataColumns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, - forkyBlck) + + elif dataColumnQuarantine[].supernode and + accumulatedDataColumns.len >= (dataColumnQuarantine[].custody_columns.len div 2): + # We have seen 50%+ data columns, we can attempt to add this block + let dataColumns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck) await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.none(BlobSidecars), Opt.some(dataColumns), - maybeFinalized = maybeFinalized) + Opt.none(BlobSidecars), Opt.some(dataColumns), + maybeFinalized = maybeFinalized) + + else: + return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, + Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), + maybeFinalized = maybeFinalized) elif consensusFork >= ConsensusFork.Deneb and consensusFork < ConsensusFork.Fulu: if not blobQuarantine[].hasBlobs(forkyBlck): @@ -1360,9 +1375,6 @@ proc addFuluMessageHandlers( let topic = getDataColumnSidecarTopic(forkDigest, i) node.network.subscribe(topic, basicParams) - for topic in blobSidecarTopics(forkDigest): - node.network.unsubscribe(topic) - proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removePhase0MessageHandlers(forkDigest) @@ -2221,6 +2233,9 @@ proc run(node: BeaconNode) {.raises: [CatchableError].} = node.startLightClient() node.requestManager.start() + if node.network.getBeaconTime().slotOrZero.epoch >= + node.network.cfg.FULU_FORK_EPOCH: + node.requestManager.switchToColumnLoop() node.syncOverseer.start() waitFor node.updateGossipStatus(wallSlot) diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 34180db344..7682adf620 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -657,7 +657,11 @@ proc start*(rman: var RequestManager) = ## Start Request Manager's loops. rman.blockLoopFuture = rman.requestManagerBlockLoop() rman.blobLoopFuture = rman.requestManagerBlobLoop() + +proc switchToColumnLoop*(rman: var RequestManager) = rman.dataColumnLoopFuture = rman.requestManagerDataColumnLoop() + if not(isNil(rman.blobLoopFuture)): + rman.blobLoopFuture.cancelSoon() proc stop*(rman: RequestManager) = ## Stop Request Manager's loop. From 9a6f749cc65db47c0be47fb2eb149bbe8e7ccdd2 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 28 Jan 2025 13:02:47 +0530 Subject: [PATCH 37/60] removed debugEchoes --- beacon_chain/validators/message_router.nim | 5 ----- 1 file changed, 5 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 35ae56f001..7a0d7a4854 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -184,19 +184,14 @@ proc routeSignedBeaconBlock*( Opt.none(DataColumnSidecars) when typeof(blck).kind >= ConsensusFork.Fulu: let blobs = blobsOpt.get - debugEcho blobs.len if blobsOpt.isSome() and blobs.len != 0: let dataColumnsRes = newClone get_data_column_sidecars(blck, blobs.mapIt(KzgBlob(bytes: it.blob))) if not dataColumnsRes[].isOk: debug "Issue with extracting data columns from blob bundle" let dataColumns = dataColumnsRes[].get() - debugEcho "Datacolumns len" - debugEcho dataColumns.len var das_workers = newSeq[Future[SendResult]](len(dataColumnsRes[].get())) - debugEcho "Das workers len" - debugEcho das_workers.len for i in 0.. Date: Tue, 28 Jan 2025 14:07:50 +0530 Subject: [PATCH 38/60] don't ignore data column sidecars, even if you already have block --- beacon_chain/gossip_processing/gossip_validation.nim | 2 -- beacon_chain/nimbus_beacon_node.nim | 9 +++++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/beacon_chain/gossip_processing/gossip_validation.nim b/beacon_chain/gossip_processing/gossip_validation.nim index 2b15ef75f0..5e1a2baca5 100644 --- a/beacon_chain/gossip_processing/gossip_validation.nim +++ b/beacon_chain/gossip_processing/gossip_validation.nim @@ -559,8 +559,6 @@ proc validateDataColumnSidecar*( # (block_header.slot, block_header.proposer_index, data_column_sidecar.index) # with valid header signature, sidecar inclusion proof, and kzg proof. let block_root = hash_tree_root(block_header) - if dag.getBlockRef(block_root).isSome(): - return errIgnore("DataColumnSidecar: already have block") if dataColumnQuarantine[].hasDataColumn( block_header.slot, block_header.proposer_index, data_column_sidecar.index): return errIgnore("DataColumnSidecar: already have valid data column from same proposer") diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index b1a5b7cc3c..a42652df9d 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -481,7 +481,12 @@ proc initFullNode( let accumulatedDataColumns = dataColumnQuarantine[].gatherDataColumns(forkyBlck.root) - if dataColumnQuarantine[].supernode and + if accumulatedColumns.len == 0: + # no data columns were sent for this post Fulu block, yet + return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, + Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), + maybeFinalized = maybeFinalized) + elif dataColumnQuarantine[].supernode and accumulatedDataColumns.len <= dataColumnQuarantine[].custody_columns.len: # We don't have the requisite number of data columns for this block yet, # so we have put in columnless quarantine as a supernode @@ -503,7 +508,7 @@ proc initFullNode( accumulatedDataColumns.len >= (dataColumnQuarantine[].custody_columns.len div 2): # We have seen 50%+ data columns, we can attempt to add this block let dataColumns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck) - await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, + return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, Opt.none(BlobSidecars), Opt.some(dataColumns), maybeFinalized = maybeFinalized) From 609ed1d75a1563648087917770007f69a8e3285d Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 28 Jan 2025 14:09:01 +0530 Subject: [PATCH 39/60] typo --- beacon_chain/nimbus_beacon_node.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index a42652df9d..823eeaf09d 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -481,7 +481,7 @@ proc initFullNode( let accumulatedDataColumns = dataColumnQuarantine[].gatherDataColumns(forkyBlck.root) - if accumulatedColumns.len == 0: + if accumulatedDataColumns.len == 0: # no data columns were sent for this post Fulu block, yet return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), From 80c36a7569286f5a8ccc4d3a3b626e796ded776b Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 28 Jan 2025 17:16:00 +0530 Subject: [PATCH 40/60] fix upgrade to fulu function --- beacon_chain/spec/beaconstate.nim | 34 ------------------------------- 1 file changed, 34 deletions(-) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 06aa862bc0..23f0fb5426 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -2372,40 +2372,6 @@ func upgrade_to_fulu*( # pending_consolidations are default empty lists ) - post.exit_balance_to_consume = - get_activation_exit_churn_limit(cfg, post[], cache) - post.consolidation_balance_to_consume = - get_consolidation_churn_limit(cfg, post[], cache) - - # [New in Electra:EIP7251] - # add validators that are not yet active to pending balance deposits - var pre_activation: seq[(Epoch, uint64)] - for index, validator in post.validators: - if validator.activation_epoch == FAR_FUTURE_EPOCH: - pre_activation.add((validator.activation_eligibility_epoch, index.uint64)) - sort(pre_activation) - - for (_, index) in pre_activation: - let balance = post.balances.item(index) - post.balances[index] = 0.Gwei - let validator = addr post.validators.mitem(index) - validator[].effective_balance = 0.Gwei - validator[].activation_eligibility_epoch = FAR_FUTURE_EPOCH - # Use bls.G2_POINT_AT_INFINITY as a signature field placeholder and - # GENESIS_SLOT to distinguish from a pending deposit request - discard post.pending_deposits.add PendingDeposit( - pubkey: validator[].pubkey, - withdrawal_credentials: validator[].withdrawal_credentials, - amount: balance, - signature: ValidatorSig.infinity, - slot: GENESIS_SLOT) - - # Ensure early adopters of compounding credentials go through the activation - # churn - for index, validator in post.validators: - if has_compounding_withdrawal_credential(validator): - queue_excess_active_balance(post[], index.uint64) - post func latest_block_root*(state: ForkyBeaconState, state_root: Eth2Digest): From 302407d3b440024410e7ba95960a1659c1c010cb Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 28 Jan 2025 19:02:03 +0530 Subject: [PATCH 41/60] modify upgrade to fulu --- beacon_chain/spec/beaconstate.nim | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 23f0fb5426..25e7f3fbf3 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -2358,13 +2358,13 @@ func upgrade_to_fulu*( historical_summaries: pre.historical_summaries, # [New in Electra:EIP6110] - deposit_requests_start_index: UNSET_DEPOSIT_REQUESTS_START_INDEX, + deposit_requests_start_index: pre.deposit_requests_start_index, # [New in Electra:EIP7251] - deposit_balance_to_consume: 0.Gwei, - exit_balance_to_consume: 0.Gwei, - earliest_exit_epoch: earliest_exit_epoch, - consolidation_balance_to_consume: 0.Gwei, + deposit_balance_to_consume: pre.deposit_balance_to_consume, + exit_balance_to_consume: pre.exit_balance_to_consume, + earliest_exit_epoch: pre.earliest_exit_epoch, + consolidation_balance_to_consume: pre.consolidation_balance_to_consume, earliest_consolidation_epoch: compute_activation_exit_epoch(get_current_epoch(pre)) From 656ce11501aa22b2b2f5b1bfcf8848f429612bb5 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 28 Jan 2025 19:12:15 +0530 Subject: [PATCH 42/60] gate blob publishing upto < Fulu --- beacon_chain/validators/message_router.nim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 7a0d7a4854..4291a17a75 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -223,7 +223,8 @@ proc routeSignedBeaconBlock*( final_columns.add dc dataColumnRefs = Opt.some(final_columns.mapIt(newClone(it))) - elif typeof(blck).kind >= ConsensusFork.Deneb: + elif typeof(blck).kind >= ConsensusFork.Deneb and + typeof(blck).kind < ConsensusFork.Fulu: if blobsOpt.isSome(): let blobs = blobsOpt.get() var workers = newSeq[Future[SendResult]](blobs.len) From c6ef55a310ced8dca60637ef8ec8d6788eb24a88 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 28 Jan 2025 22:28:59 +0530 Subject: [PATCH 43/60] fix indentation --- beacon_chain/gossip_processing/block_processor.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index 71b70345e5..6a053311a0 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -735,7 +735,7 @@ proc storeBlock( blck = shortLog(signedBlock.message), signature = shortLog(signedBlock.signature), msg = r.error() - return err((VerifierError.Invalid, ProcessingStatus.completed)) + return err((VerifierError.Invalid, ProcessingStatus.completed)) # TODO with v1.4.0, not sure this is still relevant # Establish blob viability before calling addHeadBlock to avoid # writing the block in case of blob error. From 6cc5ae2214e7a5baa77d89c7aa9b18c7834867dd Mon Sep 17 00:00:00 2001 From: agnxsh Date: Tue, 28 Jan 2025 23:44:58 +0530 Subject: [PATCH 44/60] fix fulu state upgrade --- beacon_chain/spec/beaconstate.nim | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 25e7f3fbf3..421ebac378 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -2286,18 +2286,6 @@ func upgrade_to_fulu*( blob_gas_used: pre.latest_execution_payload_header.blob_gas_used, excess_blob_gas: pre.latest_execution_payload_header.excess_blob_gas) - var max_exit_epoch = FAR_FUTURE_EPOCH - for v in pre.validators: - if v.exit_epoch != FAR_FUTURE_EPOCH: - max_exit_epoch = - if max_exit_epoch == FAR_FUTURE_EPOCH: - v.exit_epoch - else: - max(max_exit_epoch, v.exit_epoch) - if max_exit_epoch == FAR_FUTURE_EPOCH: - max_exit_epoch = get_current_epoch(pre) - let earliest_exit_epoch = max_exit_epoch + 1 - let post = (ref fulu.BeaconState)( # Versioning genesis_time: pre.genesis_time, @@ -2366,10 +2354,10 @@ func upgrade_to_fulu*( earliest_exit_epoch: pre.earliest_exit_epoch, consolidation_balance_to_consume: pre.consolidation_balance_to_consume, earliest_consolidation_epoch: - compute_activation_exit_epoch(get_current_epoch(pre)) - - # pending_balance_deposits, pending_partial_withdrawals, and - # pending_consolidations are default empty lists + compute_activation_exit_epoch(get_current_epoch(pre)), + pending_deposits: pre.pending_deposits, + pending_partial_withdrawals: pre.pending_partial_withdrawals, + pending_consolidations: pre.pending_consolidations ) post From 5d9badcf9f108544be0af1d786e624da28fddb69 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Wed, 29 Jan 2025 04:41:05 +0530 Subject: [PATCH 45/60] updated processDataColumnSidecar ordering --- beacon_chain/nimbus_beacon_node.nim | 31 +++++++++++----------- beacon_chain/validators/message_router.nim | 6 ++--- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 823eeaf09d..961e03f409 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -2048,20 +2048,6 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processSignedBeaconBlock( MsgSource.gossip, signedBlock))) - # data_column_sidecar_{subnet_id} - when consensusFork >= ConsensusFork.Fulu: - # data_column_sidecar_{subnet_id} - for it in 0'u64..= ConsensusFork.Electra: @@ -2184,8 +2170,7 @@ proc installMessageValidators(node: BeaconNode) = await node.processor.processBlsToExecutionChange( MsgSource.gossip, msg))) - when consensusFork >= ConsensusFork.Deneb and - consensusFork < ConsensusFork.Fulu: + when consensusFork >= ConsensusFork.Deneb: # blob_sidecar_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id for it in BlobId: @@ -2199,6 +2184,20 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processBlobSidecar( MsgSource.gossip, blobSidecar, subnet_id))) + # data_column_sidecar_{subnet_id} + when consensusFork >= ConsensusFork.Fulu: + # data_column_sidecar_{subnet_id} + for it in 0'u64..= ConsensusFork.Fulu: # if blobsOpt.isSome: # let @@ -125,7 +125,7 @@ proc routeSignedBeaconBlock*( # blobsOpt.get.mapIt( # KzgBlob(bytes: it.blob))) # let kzgCommits = - # signedBlock.message.body.blob_kzg_commitments.asSeq + # blck.message.body.blob_kzg_commitments.asSeq # if dataColumns[].get().len > 0 and kzgCommits.len > 0: # for i in 0.. Date: Wed, 29 Jan 2025 04:51:37 +0530 Subject: [PATCH 46/60] add upto Capella message handlers and data column sidecars topics in Fulu --- beacon_chain/nimbus_beacon_node.nim | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 961e03f409..6b46330526 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1370,7 +1370,7 @@ proc addElectraMessageHandlers( proc addFuluMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addElectraMessageHandlers(forkDigest, slot) + node.addCapellaMessageHandlers(forkDigest, slot) let targetSubnets = node.readCustodyGroupSubnets() custody = node.network.nodeId.get_custody_groups(max(SAMPLES_PER_SLOT.uint64, @@ -1405,6 +1405,14 @@ proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removeElectraMessageHandlers(forkDigest) + let + targetSubnets = node.readCustodyGroupSubnets() + custody = node.network.nodeId.get_custody_groups(max(SAMPLES_PER_SLOT.uint64, + targetSubnets.uint64)) + + for i in custody: + let topic = getDataColumnSidecarTopic(forkDigest, i) + node.network.unsubscribe(topic) proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = template lastSyncUpdate: untyped = From 4cd641372ae2d7ca68d1cae3fcd5e2b74587b6d1 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Wed, 29 Jan 2025 14:40:42 +0530 Subject: [PATCH 47/60] fix message handlers --- .../gossip_processing/block_processor.nim | 1 - beacon_chain/nimbus_beacon_node.nim | 30 +++++++++---------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index 6a053311a0..e51f83645f 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -630,7 +630,6 @@ proc storeBlock( else: Opt.none BlobSidecars # Blobs and columns can never co-exist in the same block - doAssert blobs.isSome and columns.isSome # Block has neither blob sidecar nor data column sidecar if blobs.isNone and columns.isNone: debug "Loaded parent block from storage", parent_root diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 6b46330526..c2e0e5e655 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1404,7 +1404,7 @@ proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removeDenebMessageHandlers(forkDigest) proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeElectraMessageHandlers(forkDigest) + node.removeCapellaMessageHandlers(forkDigest) let targetSubnets = node.readCustodyGroupSubnets() custody = node.network.nodeId.get_custody_groups(max(SAMPLES_PER_SLOT.uint64, @@ -2178,20 +2178,6 @@ proc installMessageValidators(node: BeaconNode) = await node.processor.processBlsToExecutionChange( MsgSource.gossip, msg))) - when consensusFork >= ConsensusFork.Deneb: - # blob_sidecar_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id - for it in BlobId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. - let subnet_id = it - node.network.addValidator( - getBlobSidecarTopic(digest, subnet_id), proc ( - blobSidecar: deneb.BlobSidecar - ): ValidationResult = - toValidationResult( - node.processor[].processBlobSidecar( - MsgSource.gossip, blobSidecar, subnet_id))) - # data_column_sidecar_{subnet_id} when consensusFork >= ConsensusFork.Fulu: # data_column_sidecar_{subnet_id} @@ -2206,6 +2192,20 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processDataColumnSidecar( MsgSource.gossip, dataColumnSidecar, subnet_id))) + when consensusFork >= ConsensusFork.Deneb: + # blob_sidecar_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id + for it in BlobId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addValidator( + getBlobSidecarTopic(digest, subnet_id), proc ( + blobSidecar: deneb.BlobSidecar + ): ValidationResult = + toValidationResult( + node.processor[].processBlobSidecar( + MsgSource.gossip, blobSidecar, subnet_id))) + node.installLightClientMessageValidators() proc stop(node: BeaconNode) = From 90ad075ae5d05e25e5b9944ee179688bd94d28a0 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Wed, 29 Jan 2025 14:44:36 +0530 Subject: [PATCH 48/60] fix copyright year --- beacon_chain/sync/request_manager.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 7682adf620..7f5e4ee16c 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). From 7c7bc220b4b7579ff03a073a24b92a1f0c93cdc9 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Wed, 29 Jan 2025 16:22:43 +0530 Subject: [PATCH 49/60] another fix in upgrade to fulu --- beacon_chain/spec/beaconstate.nim | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 421ebac378..f28f9408a7 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -2353,8 +2353,7 @@ func upgrade_to_fulu*( exit_balance_to_consume: pre.exit_balance_to_consume, earliest_exit_epoch: pre.earliest_exit_epoch, consolidation_balance_to_consume: pre.consolidation_balance_to_consume, - earliest_consolidation_epoch: - compute_activation_exit_epoch(get_current_epoch(pre)), + earliest_consolidation_epoch: pre.earliest_consolidation_epoch, pending_deposits: pre.pending_deposits, pending_partial_withdrawals: pre.pending_partial_withdrawals, pending_consolidations: pre.pending_consolidations From c434daf86251e8a30547c6e6a373300aa6427bc7 Mon Sep 17 00:00:00 2001 From: agnxsh Date: Wed, 29 Jan 2025 17:32:30 +0530 Subject: [PATCH 50/60] refine DA checking more --- beacon_chain/nimbus_beacon_node.nim | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index c2e0e5e655..5b60ff6713 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -486,24 +486,6 @@ proc initFullNode( return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), maybeFinalized = maybeFinalized) - elif dataColumnQuarantine[].supernode and - accumulatedDataColumns.len <= dataColumnQuarantine[].custody_columns.len: - # We don't have the requisite number of data columns for this block yet, - # so we have put in columnless quarantine as a supernode - if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): - return err(VerifierError.UnviableFork) - else: - return err(VerifierError.MissingParent) - - elif not dataColumnQuarantine[].supernode and - accumulatedDataColumns.len <= dataColumnQuarantine[].custody_columns.len: - # We don't have the requisite number of data columns for this block yet, - # so we have put in columnless quarantine as fullnode - if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck): - return err(VerifierError.UnviableFork) - else: - return err(VerifierError.MissingParent) - elif dataColumnQuarantine[].supernode and accumulatedDataColumns.len >= (dataColumnQuarantine[].custody_columns.len div 2): # We have seen 50%+ data columns, we can attempt to add this block @@ -513,8 +495,9 @@ proc initFullNode( maybeFinalized = maybeFinalized) else: + let dataColumns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck) return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock, - Opt.none(BlobSidecars), Opt.none(DataColumnSidecars), + Opt.none(BlobSidecars), Opt.some(dataColumns), maybeFinalized = maybeFinalized) elif consensusFork >= ConsensusFork.Deneb and consensusFork < ConsensusFork.Fulu: From c5461a1f232d64cce21ae8d0b79042ead1b18436 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 1 Feb 2025 14:20:19 +0530 Subject: [PATCH 51/60] log out time taken to reconstruct --- beacon_chain/spec/peerdas_helpers.nim | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index 434c8d9297..5694351db1 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -10,6 +10,7 @@ # Uncategorized helper functions from the spec import std/[algorithm, sequtils], + chronicles, results, eth/p2p/discoveryv5/[node], kzg4844/[kzg], @@ -163,6 +164,8 @@ proc recover_cells_and_proofs*( if not (data_columns.len != 0): return err("DataColumnSidecar: Length should not be 0") + let start = Moment.now() + let columnCount = data_columns.len blobCount = data_columns[0].column.len @@ -198,7 +201,8 @@ proc recover_cells_and_proofs*( recovered_cps[bIdx] = recovered_cells_and_proofs.get - + let finish = Moment.now() + debug "Time take to reconstruct sequentially", time = finish-start ok(recovered_cps) # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_data_column_sidecars From 07ce6ef706605dd43a692394fd457d9b966e73d9 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 1 Feb 2025 14:52:13 +0530 Subject: [PATCH 52/60] log out sidecar comms and proof len for testing --- beacon_chain/spec/peerdas_helpers.nim | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index 5694351db1..d338077a1c 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -434,6 +434,15 @@ proc verify_data_column_sidecar_kzg_proofs*(sidecar: DataColumnSidecar): # Check is the sidecar column length = sidecar.kzg_commitments length # and sidecar.kzg_commitments length = sidecar.kzg_proofs length + debugEcho "sidecar column len" + debugEcho sidecar.column.len + + debugEcho "sidecar kzg commitments len" + debugEcho sidecar.kzg_commitments.len + + debugEcho "sidecar kzg proofs len" + debugEcho sidecar.kzg_proofs.len + if not (sidecar.column.len == sidecar.kzg_commitments.len): return err("Data column sidecar length is not equal to the kzg_commitments length") From a7af5513b60c76dbb280e0c01ee7120ffdcfd2ce Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Wed, 5 Feb 2025 13:39:53 +0530 Subject: [PATCH 53/60] address review comments --- beacon_chain/beacon_chain_file.nim | 2 +- beacon_chain/nimbus_beacon_node.nim | 13 +++++-------- beacon_chain/sync/sync_manager.nim | 1 + 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index a07a6894dd..f8c4c2a45a 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -80,7 +80,7 @@ const BlobForkCodeRange = MaxForksCount .. (MaxForksCount + int(high(ConsensusFork)) - int(ConsensusFork.Deneb)) DataColumnForkCodeRange = - MaxForksCount .. (MaxForksCount + int(high(ConsensusFork)) - int(ConsensusFork.Fulu)) + MaxForksCount * 2 .. (MaxForksCount * 2 + int(high(ConsensusFork)) - int(ConsensusFork.Fulu)) func getBlockForkCode(fork: ConsensusFork): uint64 = uint64(fork) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 5b60ff6713..37e36c9bc1 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -429,14 +429,6 @@ proc initFullNode( NUMBER_OF_CUSTODY_GROUPS.uint64 else: CUSTODY_REQUIREMENT.uint64 - custody_columns_set = - node.network.nodeId.resolve_column_sets_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, - localCustodyGroups)) - custody_columns_list = - node.network.nodeId.resolve_column_list_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, - localCustodyGroups)) dataColumnQuarantine[].supernode = supernode dataColumnQuarantine[].custody_columns = node.network.nodeId.resolve_columns_from_custody_groups( @@ -444,6 +436,11 @@ proc initFullNode( localCustodyGroups)) let + custody_columns_set = + dataColumnQuarantine[].custody_columns.toHashSet() + custody_columns_list = + List[ColumnIndex, NUMBER_OF_COLUMNS].init( + dataColumnQuarantine[].custody_columns) consensusManager = ConsensusManager.new( dag, attestationPool, quarantine, node.elManager, ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index 1405ebd934..865183a602 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -387,6 +387,7 @@ func groupDataColumns*( continue # Clients MUST include all data column sidecars of each block from which they include data column sidecars. # The following data column sidecars, where they exist, MUST be sent in consecutive (slot, index) order. + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1 let header = forkyBlck.toSignedBeaconBlockHeader() for column_idx in 0.. Date: Wed, 5 Feb 2025 13:46:56 +0530 Subject: [PATCH 54/60] fix typo --- beacon_chain/sync/sync_manager.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index 865183a602..a86ac27257 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -757,7 +757,7 @@ proc syncStep[A, B]( groupedDataColumns.checkDataColumns().isOkOr: peer.updateScore(PeerScoreBadResponse) man.queue.push(req) - warn "Recieved data columns verification failed", + warn "Received data columns verification failed", data_columns_count = len(dataColumnData), data_columns_map = getShortMap(req, dataColumnData), request = req, From 4cf8a586db47edee3ab861770f42f93e9b577831 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Wed, 5 Feb 2025 13:55:45 +0530 Subject: [PATCH 55/60] use disjoint --- beacon_chain/sync/sync_manager.nim | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index a86ac27257..8fab029752 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -256,11 +256,8 @@ proc checkPeerCustody(man: SyncManager, max(SAMPLES_PER_SLOT.uint64, remoteCustodyGroupCount)) - for local_column in man.custody_columns_set: - if local_column in remoteCustodyColumns: - return false - - return true + return disjoint(man.custody_columns_set, + remoteCustodyColumns) else: return false From 0a79a51f4abbaf954cb4958dd0fe416ef55b2501 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Sat, 22 Feb 2025 11:27:07 +0530 Subject: [PATCH 56/60] remove column support from SyncManager and LFS --- beacon_chain/beacon_chain_file.nim | 69 +------ .../blockchain_list.nim | 45 +---- beacon_chain/nimbus_beacon_node.nim | 24 +-- beacon_chain/sync/sync_manager.nim | 191 +----------------- beacon_chain/sync/sync_overseer.nim | 10 +- beacon_chain/sync/sync_queue.nim | 76 +------ tests/test_sync_manager.nim | 100 +++------ 7 files changed, 56 insertions(+), 459 deletions(-) diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index f8c4c2a45a..b8c8c2128a 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -79,8 +79,6 @@ const int(ConsensusFork.Phase0) .. int(high(ConsensusFork)) BlobForkCodeRange = MaxForksCount .. (MaxForksCount + int(high(ConsensusFork)) - int(ConsensusFork.Deneb)) - DataColumnForkCodeRange = - MaxForksCount * 2 .. (MaxForksCount * 2 + int(high(ConsensusFork)) - int(ConsensusFork.Fulu)) func getBlockForkCode(fork: ConsensusFork): uint64 = uint64(fork) @@ -96,13 +94,6 @@ func getBlobForkCode(fork: ConsensusFork): uint64 = of ConsensusFork.Phase0 .. ConsensusFork.Capella: raiseAssert "Blobs are not supported for the fork" -func getDataColumnForkCode(fork: ConsensusFork): uint64 = - case fork - of ConsensusFork.Fulu: - uint64(MaxForksCount) - of ConsensusFork.Phase0 .. ConsensusFork.Electra: - raiseAssert "Data columns are not supported for the fork" - proc init(t: typedesc[ChainFileError], k: ChainFileErrorType, m: string): ChainFileError = ChainFileError(kind: k, message: m) @@ -143,8 +134,7 @@ proc checkKind(kind: uint64): Result[void, string] = if res > uint64(high(int)): return err("Unsuppoted chunk kind value") int(res) - if (hkind in BlockForkCodeRange) or (hkind in BlobForkCodeRange) or - (hkind in DataColumnForkCodeRange): + if (hkind in BlockForkCodeRange) or (hkind in BlobForkCodeRange): ok() else: err("Unsuppoted chunk kind value") @@ -270,12 +260,6 @@ template getBlobChunkKind(kind: ConsensusFork, last: bool): uint64 = else: getBlobForkCode(kind) -template getDataColumnChunkKind(kind: ConsensusFork,last: bool): uint64 = - if last: - maskKind(getDataColumnForkCode(kind)) - else: - getDataColumnForkCode(kind) - proc getBlockConsensusFork(header: ChainFileHeader): ConsensusFork = let hkind = unmaskKind(header.kind) if int(hkind) in BlockForkCodeRange: @@ -291,10 +275,6 @@ template isBlob(h: ChainFileHeader | ChainFileFooter): bool = let hkind = unmaskKind(h.kind) int(hkind) in BlobForkCodeRange -template isDataColumn(h: ChainFileHeader | ChainFileFooter): bool = - let hkind = unmaskKind(h.kind) - int(hkind) in DataColumnForkCodeRange - template isLast(h: ChainFileHeader | ChainFileFooter): bool = h.kind.isLast() @@ -311,7 +291,7 @@ proc setTail*(chandle: var ChainFileHandle, bdata: BlockData) = chandle.data.tail = Opt.some(bdata) proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars]): + blobs: Opt[BlobSidecars]): Result[void, string] = let origOffset = updateFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).valueOr: @@ -363,36 +343,6 @@ proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock, discard fsync(chandle.handle) return err(IncompleteWriteError) - if dataColumns.isSome(): - let dataColumnSidecars = - dataColumns.get - for index, dataColumn in dataColumnSidecars.pairs(): - let - kind = - getDataColumnChunkKind(signedBlock.kind, (index + 1) == - len(dataColumnSidecars)) - (data, plainSize) = - block: - let res = SSZ.encode(dataColumn[]) - (snappy.encode(res), len(res)) - slot = dataColumn[].signed_block_header.message.slot - buffer = Chunk.init(kind, uint64(slot), uint32(plainSize), data) - - setFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).isOkOr: - discard truncate(chandle.handle, origOffset) - discard fsync(chandle.handle) - return err(ioErrorMsg(error)) - - let - wrote = writeFile(chandle.handle, buffer).valueOr: - discard truncate(chandle.handle, origOffset) - discard fsync(chandle.handle) - return err(ioErrorMsg(error)) - if wrote != uint(len(buffer)): - discard truncate(chandle.handle, origOffset) - discard fsync(chandle.handle) - return err(IncompleteWriteError) - fsync(chandle.handle).isOkOr: discard truncate(chandle.handle, origOffset) return err(ioErrorMsg(error)) @@ -601,21 +551,6 @@ proc decodeBlob( return err("Incorrect blob format") ok(blob) -proc decodeDataColumn( - header: ChainFileHeader, - data: openArray[byte], -): Result[DataColumnSidecar, string] = - if header.plainSize > uint32(MaxChunkSize): - return err("Size of data column is enormously big") - - let - decompressed = snappy.decode(data, uint32(header.plainSize)) - dataColumn = - try: - SSZ.decode(decompressed, DataColumnSidecar) - except SerializationError: - return err("Incorrect data column format") - ok(dataColumn) proc getChainFileTail*(handle: IoHandle): Result[Opt[BlockData], string] = var sidecars: BlobSidecars diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index e4c5e59b5b..4d909339ed 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -9,7 +9,6 @@ import std/sequtils, stew/io2, chronicles, chronos, metrics, ../spec/forks, - ../spec/peerdas_helpers, ../[beacon_chain_file, beacon_clock], ../sszdump @@ -129,7 +128,7 @@ proc setTail*(clist: ChainListRef, bdata: BlockData) = clist.handle = Opt.some(handle) proc store*(clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], dataColumns: Opt[DataColumnSidecars]): + blobs: Opt[BlobSidecars]): Result[void, string] = if clist.handle.isNone(): let @@ -137,9 +136,9 @@ proc store*(clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock, flags = {ChainFileFlag.Repair, ChainFileFlag.OpenAlways} handle = ? ChainFileHandle.init(filename, flags) clist.handle = Opt.some(handle) - store(handle, signedBlock, blobs, dataColumns) + store(handle, signedBlock, blobs) else: - store(clist.handle.get(), signedBlock, blobs, dataColumns) + store(clist.handle.get(), signedBlock, blobs) proc checkBlobs(signedBlock: ForkedSignedBeaconBlock, blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = @@ -169,30 +168,9 @@ proc checkBlobs(signedBlock: ForkedSignedBeaconBlock, return err(VerifierError.Invalid) ok() -proc checkDataColumns*(signedBlock: ForkedSignedBeaconBlock, - dataColumnsOpt: Opt[DataColumnSidecars]): - Result[void, VerifierError] = - withBlck(signedBlock): - when consensusFork >= ConsensusFork.Fulu: - if dataColumnsOpt.isSome: - let dataColumns = dataColumnsOpt.get() - if dataColumns.len > 0: - for i in 0..= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) -proc checkPeerCustody(man: SyncManager, - peer: Peer): - bool = - # Returns TRUE if the peer custodies atleast, - # ONE of the common custody columns, straight - # away return TRUE if the peer is a supernode. - if man.supernode: - # For a supernode, it is always best/optimistic - # to filter other supernodes, rather than filter - # too many full nodes that have a subset of the - # custody columns - if peer.lookupCgcFromPeer() == - NUMBER_OF_CUSTODY_GROUPS.uint64: - return true - - else: - if peer.lookupCgcFromPeer() == - NUMBER_OF_CUSTODY_GROUPS.uint64: - return true - - elif peer.lookupCgcFromPeer() == - CUSTODY_REQUIREMENT.uint64: - - # Fetch the remote custody count - let remoteCustodyGroupCount = - peer.lookupCgcFromPeer() - - # Extract remote peer's nodeID from peerID - # Fetch custody groups from remote peer - let - remoteNodeId = fetchNodeIdFromPeerId(peer) - remoteCustodyColumns = - remoteNodeId.resolve_column_sets_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, - remoteCustodyGroupCount)) - - return disjoint(man.custody_columns_set, - remoteCustodyColumns) - - else: - return false - proc shouldGetBlobs[A, B](man: SyncManager[A, B], r: SyncRequest[A]): bool = man.shouldGetBlobs(r.slot) or man.shouldGetBlobs(r.slot + (r.count - 1)) -proc shouldGetDataColumns[A, B](man: SyncManager[A,B], s: Slot): bool = - let - wallEpoch = man.getLocalWallSlot().epoch - epoch = s.epoch() - (epoch >= man.FULU_FORK_EPOCH) and - (wallEpoch < man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS or - epoch >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) - -proc shouldGetDataColumns[A, B](man: SyncManager[A, B], r: SyncRequest[A]): bool = - man.shouldGetDataColumns(r.slot) or man.shouldGetDataColumns(r.slot + (r.count - 1)) - proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A, req: SyncRequest[A]): Future[BlobSidecarsRes] {.async: (raises: [CancelledError], raw: true).} = @@ -292,24 +222,6 @@ proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A, debug "Requesting blobs sidecars from peer", request = req blobSidecarsByRange(peer, req.slot, req.count) -proc getDataColumnSidecars[A, B](man: SyncManager[A, B], - peer: A, - req: SyncRequest): - Future[DataColumnSidecarsRes] - {.async: (raises: [CancelledError], raw: true).} = - mixin getScore, `==` - - logScope: - peer_score = peer.getScore() - peer_speed = peer.netKbps() - sync_ident = man.direction - topics = "syncman" - - doAssert(not(req.isEmpty()), "Request must not be empty!") - debug "Requesting data column sidecars from peer", request = req - dataColumnSidecarsByRange(peer, req.slot, req.count, man.custody_columns_list) - - proc remainingSlots(man: SyncManager): uint64 = let first = man.getFirstSlot() @@ -369,43 +281,6 @@ func checkBlobs(blobs: seq[BlobSidecars]): Result[void, string] = ? blob_sidecar[].verify_blob_sidecar_inclusion_proof() ok() -func groupDataColumns*( - blocks: seq[ref ForkedSignedBeaconBlock], - data_columns: seq[ref DataColumnSidecar] -): Result[seq[DataColumnSidecars], string] = - var - grouped = newSeq[DataColumnSidecars](len(blocks)) - column_cursor = 0 - for block_idx, blck in blocks: - withBlck(blck[]): - when consensusFork >= ConsensusFork.Fulu: - template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments - if kzgs.len == 0: - continue - # Clients MUST include all data column sidecars of each block from which they include data column sidecars. - # The following data column sidecars, where they exist, MUST be sent in consecutive (slot, index) order. - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1 - let header = forkyBlck.toSignedBeaconBlockHeader() - for column_idx in 0..= ConsensusFork.Fulu: - if forkyBlck.message.body.blob_kzg_commitments.len > 0: - hasDataColumns = true - break - hasDataColumns - let blobData = if shouldGetBlobs: let blobs = await man.getBlobSidecars(peer, req) @@ -716,54 +578,6 @@ proc syncStep[A, B]( else: Opt.none(seq[BlobSidecars]) - let dataColumnData = - if shouldGetDataColumns and man.checkPeerCustody(peer): - let data_columns = await man.getDataColumnSidecars(peer, req) - if data_columns.isErr(): - peer.updateScore(PeerScoreNoValues) - man.queue.push(req) - debug "Failed to receive data_columns on request", - request = req, err = data_columns.error - return - let dataColumnData = data_columns.get().asSeq() - debug "Received data columns on request", - data_columns_count = len(dataColumnData), - data_columns_map = getShortMap(req, dataColumnData), - request = req - - if len(dataColumnData) > 0: - let slots = - mapIt(dataColumnData, it[].signed_block_header.message.slot) - checkDataColumnsResponse(req, slots).isOkOr: - peer.updateScore(PeerScoreBadResponse) - man.queue.push(req) - warn "Incorrect data column sequence received", - data_columns_count = len(dataColumnData), - data_columns_map = getShortMap(req, dataColumnData), - request = req, - reason = error - return - let groupedDataColumns = groupDataColumns(blockData, dataColumnData).valueOr: - peer.updateScore(PeerScoreNoValues) - man.queue.push(req) - info "Received data columns sequence is inconsistent", - data_columns_map = getShortMap(req, dataColumnData), - request = req, msg = error - return - - groupedDataColumns.checkDataColumns().isOkOr: - peer.updateScore(PeerScoreBadResponse) - man.queue.push(req) - warn "Received data columns verification failed", - data_columns_count = len(dataColumnData), - data_columns_map = getShortMap(req, dataColumnData), - request = req, - reason = error - return - Opt.some(groupedDataColumns) - else: - Opt.none(seq[DataColumnSidecars]) - if len(blockData) == 0 and man.direction == SyncQueueKind.Backward and req.contains(man.getSafeSlot()): # The sync protocol does not distinguish between: @@ -789,8 +603,7 @@ proc syncStep[A, B]( maybeFinalized = lastSlot < peerFinalized await man.queue.push( - req, blockData, blobData, - dataColumnData, maybeFinalized, proc() = + req, blockData, blobData, maybeFinalized, proc() = man.workers[index].status = SyncWorkerStatus.Processing) proc syncWorker[A, B]( diff --git a/beacon_chain/sync/sync_overseer.nim b/beacon_chain/sync/sync_overseer.nim index 23a7f07299..cbbab92849 100644 --- a/beacon_chain/sync/sync_overseer.nim +++ b/beacon_chain/sync/sync_overseer.nim @@ -394,16 +394,13 @@ proc initUntrustedSync(overseer: SyncOverseerRef): Future[void] {. let blck = await overseer.getBlock(blockHeader.slot, blockHeader) blobsCount = if blck.blob.isNone(): 0 else: len(blck.blob.get()) - dataColumnsCount = - if blck.dataColumn.isNone(): 0 else: len(blck.dataColumn.get()) notice "Received beacon block", blck = shortLog(blck.blck), - blobs_count = blobsCount, - data_columns_count = dataColumnsCount + blobs_count = blobsCount overseer.statusMsg = Opt.some("storing block") - let res = overseer.clist.addBackfillBlockData(blck.blck, blck.blob, blck.dataColumn) + let res = overseer.clist.addBackfillBlockData(blck.blck, blck.blob) if res.isErr(): warn "Unable to store initial block", reason = res.error return @@ -411,8 +408,7 @@ proc initUntrustedSync(overseer: SyncOverseerRef): Future[void] {. overseer.statusMsg = Opt.none(string) notice "Initial block being stored", - blck = shortLog(blck.blck), blobs_count = blobsCount, - data_columns_count = dataColumnsCount + blck = shortLog(blck.blck), blobs_count = blobsCount proc startBackfillTask(overseer: SyncOverseerRef): Future[void] {. async: (raises: []).} = diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 36be5d40a3..906c723a85 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -28,7 +28,6 @@ type ProcessingCallback* = proc() {.gcsafe, raises: [].} BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], - data_columns: Opt[DataColumnSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} @@ -46,7 +45,6 @@ type request*: SyncRequest[T] data*: seq[ref ForkedSignedBeaconBlock] blobs*: Opt[seq[BlobSidecars]] - dataColumns*: Opt[seq[DataColumnSidecars]] GapItem*[T] = object start*: Slot @@ -135,27 +133,6 @@ proc getShortMap*[T](req: SyncRequest[T], res.add('|') res -proc getShortMap*[T](req: SyncRequest[T], - data: openArray[ref DataColumnSidecar]): string = - ## Returns all slot numbers in ``data`` as placement map. - var res = newStringOfCap(req.count * MAX_BLOBS_PER_BLOCK) - var cur : uint64 = 0 - for slot in req.slot..= lenu64(data): - res.add('|') - continue - if slot == data[cur].signed_block_header.message.slot: - for k in cur..= lenu64(data) or slot != data[k].signed_block_header.message.slot: - res.add('|') - break - else: - inc(cur) - res.add('x') - else: - res.add('|') - res - proc contains*[T](req: SyncRequest[T], slot: Slot): bool {.inline.} = slot >= req.slot and slot < req.slot + req.count @@ -224,36 +201,6 @@ proc checkBlobsResponse*[T](req: SyncRequest[T], ok() -proc checkDataColumnsResponse*[T](req: SyncRequest[T], - data: openArray[Slot]): - Result[void, cstring] = - if data.len == 0: - # Impossible to verify empty response - return ok() - - if lenu64(data) > (req.count * NUMBER_OF_COLUMNS): - # Number of data columns in response should be less or equal to - # number of requested (blocks * MAX_BLOCKS_PER_BLOCK_ELECTRA). - return err("Too many data columns received") - - var - pSlot = data[0] - counter = 0'u64 - for slot in data: - if (slot < req.slot) or (slot >= req.slot + req.count): - return err("Some of the data columns are not in requested range") - if slot < pSlot: - return err("incorrect order") - if slot == pSlot: - inc counter - if counter > MAX_BLOBS_PER_BLOCK_ELECTRA: - return err("Number of data columns in the block exceeds the limit") - else: - counter = 1'u64 - pSlot = slot - - ok() - proc init[T](t1: typedesc[SyncRequest], kind: SyncQueueKind, start: Slot, finish: Slot, t2: typedesc[T]): SyncRequest[T] = let count = finish - start + 1'u64 @@ -633,24 +580,15 @@ func getOpt(blobs: Opt[seq[BlobSidecars]], i: int): Opt[BlobSidecars] = else: Opt.none(BlobSidecars) -# This belongs inside the blocks iterator below, but can't be there due to -# https://github.com/nim-lang/Nim/issues/21242 -func getOpt(data_columns: Opt[seq[DataColumnSidecars]], i: int): Opt[DataColumnSidecars] = - if data_columns.isSome: - Opt.some(data_columns.get()[i]) - else: - Opt.none DataColumnSidecars - iterator blocks[T](sq: SyncQueue[T], - sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars], - Opt[DataColumnSidecars]) = + sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) = case sq.kind of SyncQueueKind.Forward: for i in countup(0, len(sr.data) - 1): - yield (sr.data[i], sr.blobs.getOpt(i), sr.dataColumns.getOpt(i)) + yield (sr.data[i], sr.blobs.getOpt(i)) of SyncQueueKind.Backward: for i in countdown(len(sr.data) - 1, 0): - yield (sr.data[i], sr.blobs.getOpt(i), sr.dataColumns.getOpt(i)) + yield (sr.data[i], sr.blobs.getOpt(i)) proc advanceOutput*[T](sq: SyncQueue[T], number: uint64) = case sq.kind @@ -705,7 +643,6 @@ func numAlreadyKnownSlots[T](sq: SyncQueue[T], sr: SyncRequest[T]): uint64 = proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], data: seq[ref ForkedSignedBeaconBlock], blobs: Opt[seq[BlobSidecars]], - dataColumns: Opt[seq[DataColumnSidecars]], maybeFinalized: bool = false, processingCb: ProcessingCallback = nil) {.async: (raises: [CancelledError]).} = logScope: @@ -733,8 +670,7 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], # SyncQueue reset happens. We are exiting to wake up sync-worker. return else: - let syncres = SyncResult[T](request: sr, data: data, blobs: blobs, - dataColumns: dataColumns) + let syncres = SyncResult[T](request: sr, data: data, blobs: blobs) sq.readyQueue.push(syncres) break @@ -784,8 +720,8 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], res: Result[void, VerifierError] var i=0 - for blk, blb, cols in sq.blocks(item): - res = await sq.blockVerifier(blk[], blb, cols, maybeFinalized) + for blk, blb in sq.blocks(item): + res = await sq.blockVerifier(blk[], blb, maybeFinalized) inc(i) if res.isOk(): diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 7a3d326a7a..3fe2d82856 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -13,8 +13,7 @@ import unittest2 import chronos import ../beacon_chain/gossip_processing/block_processor, ../beacon_chain/sync/sync_manager, - ../beacon_chain/spec/forks, - ../beacon_chain/spec/peerdas_helpers + ../beacon_chain/spec/forks type SomeTPeer = ref object @@ -51,7 +50,7 @@ func collector(queue: AsyncQueue[BlockEntry]): BlockVerifier = # testing goes, this is risky because it might introduce differences between # the BlockProcessor and this test proc verify(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], - dataColumns: Opt[DataColumnSidecars], maybeFinalized: bool): + maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = let fut = Future[Result[void, VerifierError]].Raising([CancelledError]).init() try: queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut)) @@ -111,35 +110,6 @@ suite "SyncManager test suite": inc sidecarIdx res - proc createDataColumns( - blocks: var seq[ref ForkedSignedBeaconBlock], slots: seq[Slot] - ): seq[ref DataColumnSidecar] = - var res = newSeq[ref DataColumnSidecar](len(slots)) - for blck in blocks: - withBlck(blck[]): - when consensusFork >= ConsensusFork.Fulu: - template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments - for i, slot in slots: - if slot == forkyBlck.message.slot: - doAssert kzgs.add default(KzgCommitment) - if kzgs.len > 0: - forkyBlck.root = hash_tree_root(forkyBlck.message) - var - kzg_proofs: KzgProofs - blobs: Blobs - for _ in kzgs: - doAssert kzg_proofs.add default(KzgProof) - doAssert blobs.add default(Blob) - let bsidecars = forkyBlck.create_blob_sidecars(kzg_proofs, blobs) - let dcsidecars = - forkyBlck.get_data_column_sidecars(bsidecars.mapIt(KzgBlob(bytes: it.blob))) - var sidecarIdx = 0 - for i, slot in slots: - if slot == forkyBlck.message.slot: - res[i] = newClone dcsidecars.get[sidecarIdx] - inc sidecarIdx - res - func getSlice(chain: openArray[ref ForkedSignedBeaconBlock], startSlot: Slot, request: SyncRequest[SomeTPeer]): seq[ref ForkedSignedBeaconBlock] = let @@ -396,8 +366,7 @@ suite "SyncManager test suite": if request.isEmpty(): break await queue.push(request, getSlice(chain, start, request), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await validatorFut.cancelAndWait() waitFor runSmokeTest() case kkind @@ -471,8 +440,7 @@ suite "SyncManager test suite": var r13 = queue.pop(finishSlot, p3) var f13 = queue.push(r13, chain.getSlice(startSlot, r13), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check: f13.finished == false @@ -481,8 +449,7 @@ suite "SyncManager test suite": of SyncQueueKind.Backward: counter == int(finishSlot) var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check: case kkind @@ -492,8 +459,7 @@ suite "SyncManager test suite": f13.finished == false var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await allFutures(f11, f12, f13) check: f12.finished == true and f12.failed == false @@ -596,8 +562,7 @@ suite "SyncManager test suite": check response[0][].slot >= getFowardSafeSlotCb() else: check response[^1][].slot <= getBackwardSafeSlotCb() - await queue.push(request, response, Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + await queue.push(request, response, Opt.none(seq[BlobSidecars])) await validatorFut.cancelAndWait() waitFor runTest() @@ -680,8 +645,7 @@ suite "SyncManager test suite": # Handle request 1. Should be re-enqueued as it simulates `Invalid`. let response1 = getSlice(chain, start, request1) - await queue.push(request1, response1, Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + await queue.push(request1, response1, Opt.none(seq[BlobSidecars])) check debtLen(queue) == request2.count + request1.count # Request 1 should be discarded as it is no longer relevant. @@ -693,8 +657,7 @@ suite "SyncManager test suite": # Handle request 3. Should be re-enqueued as it simulates `Invalid`. let response3 = getSlice(chain, start, request3) - await queue.push(request3, response3, Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + await queue.push(request3, response3, Opt.none(seq[BlobSidecars])) check debtLen(queue) == request3.count # Request 2 should be re-issued. @@ -708,8 +671,7 @@ suite "SyncManager test suite": # Handle request 4. Should be re-enqueued as it simulates `Invalid`. let response4 = getSlice(chain, start, request4) - await queue.push(request4, response4, Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + await queue.push(request4, response4, Opt.none(seq[BlobSidecars])) check debtLen(queue) == request4.count # Advance `safeSlot` out of band. @@ -826,16 +788,14 @@ suite "SyncManager test suite": var r14 = queue.pop(finishSlot, p4) var f14 = queue.push(r14, chain.getSlice(startSlot, r14), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check: f14.finished == false counter == int(startSlot) var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check: counter == int(startSlot) @@ -843,8 +803,7 @@ suite "SyncManager test suite": f14.finished == false var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await allFutures(f11, f12) check: counter == int(startSlot + chunkSize + chunkSize) @@ -856,8 +815,7 @@ suite "SyncManager test suite": withBlck(missingSlice[0][]): forkyBlck.message.proposer_index = 0xDEADBEAF'u64 var f13 = queue.push(r13, missingSlice, - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await allFutures(f13, f14) check: f11.finished == true and f11.failed == false @@ -879,20 +837,17 @@ suite "SyncManager test suite": check r18.isEmpty() == true var f17 = queue.push(r17, chain.getSlice(startSlot, r17), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check f17.finished == false var f16 = queue.push(r16, chain.getSlice(startSlot, r16), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check f16.finished == false var f15 = queue.push(r15, chain.getSlice(startSlot, r15), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await allFutures(f15, f16, f17) check: f15.finished == true and f15.failed == false @@ -939,8 +894,7 @@ suite "SyncManager test suite": # Push a single request that will fail with all blocks being unviable var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) discard await f11.withTimeout(1.seconds) check: @@ -1006,16 +960,14 @@ suite "SyncManager test suite": var r14 = queue.pop(finishSlot, p4) var f14 = queue.push(r14, chain.getSlice(startSlot, r14), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check: f14.finished == false counter == int(finishSlot) var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check: counter == int(finishSlot) @@ -1023,8 +975,7 @@ suite "SyncManager test suite": f14.finished == false var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await allFutures(f11, f12) check: counter == int(finishSlot - chunkSize - chunkSize) @@ -1035,8 +986,7 @@ suite "SyncManager test suite": var missingSlice = chain.getSlice(startSlot, r13) withBlck(missingSlice[0][]): forkyBlck.message.proposer_index = 0xDEADBEAF'u64 - var f13 = queue.push(r13, missingSlice, Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + var f13 = queue.push(r13, missingSlice, Opt.none(seq[BlobSidecars])) await allFutures(f13, f14) check: f11.finished == true and f11.failed == false @@ -1054,14 +1004,12 @@ suite "SyncManager test suite": check r17.isEmpty() == true var f16 = queue.push(r16, chain.getSlice(startSlot, r16), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await sleepAsync(100.milliseconds) check f16.finished == false var f15 = queue.push(r15, chain.getSlice(startSlot, r15), - Opt.none(seq[BlobSidecars]), - Opt.none(seq[DataColumnSidecars])) + Opt.none(seq[BlobSidecars])) await allFutures(f15, f16) check: f15.finished == true and f15.failed == false From 26b0ce323910056bf2664e4843334a2c09ff1622 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 4 Mar 2025 17:42:10 +0530 Subject: [PATCH 57/60] rm useless commented code --- beacon_chain/validators/message_router.nim | 24 ---------------------- 1 file changed, 24 deletions(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index c1d1ddcfd1..9ebea2c656 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -115,30 +115,6 @@ proc routeSignedBeaconBlock*( signature = shortLog(blck.signature), error = res.error() return err($(res.error()[1])) - # # May not be required as we are already - # # KZG verifying the blobs once - # when typeof(blck).kind >= ConsensusFork.Fulu: - # if blobsOpt.isSome: - # let - # dataColumns = - # newClone get_data_column_sidecars(blck, - # blobsOpt.get.mapIt( - # KzgBlob(bytes: it.blob))) - # let kzgCommits = - # blck.message.body.blob_kzg_commitments.asSeq - # if dataColumns[].get().len > 0 and kzgCommits.len > 0: - # for i in 0..= ConsensusFork.Deneb: if blobsOpt.isSome: let blobs = blobsOpt.get() From 72ca0139ff3304c867f42082c5bf3df7f478dd23 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 4 Mar 2025 17:55:23 +0530 Subject: [PATCH 58/60] fix post rebase issue --- beacon_chain/validators/message_router.nim | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 9ebea2c656..41ab12f78f 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -205,7 +205,9 @@ proc routeSignedBeaconBlock*( let blobs = blobsOpt.get() var workers = newSeq[Future[SendResult]](blobs.len) for i in 0.. Date: Tue, 4 Mar 2025 18:05:36 +0530 Subject: [PATCH 59/60] fix some style issues, and rm debug echoes --- beacon_chain/beacon_chain_file.nim | 4 +--- beacon_chain/consensus_object_pools/blockchain_list.nim | 6 ++---- beacon_chain/spec/peerdas_helpers.nim | 8 -------- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index 13ae49334c..46b7a4bc31 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -291,8 +291,7 @@ proc setTail*(chandle: var ChainFileHandle, bdata: BlockData) = chandle.data.tail = Opt.some(bdata) proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars]): - Result[void, string] = + blobs: Opt[BlobSidecars]): Result[void, string] = let origOffset = updateFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).valueOr: return err(ioErrorMsg(error)) @@ -551,7 +550,6 @@ proc decodeBlob( return err("Incorrect blob format") ok(blob) - proc getChainFileTail*(handle: IoHandle): Result[Opt[BlockData], string] = var sidecars: BlobSidecars while true: diff --git a/beacon_chain/consensus_object_pools/blockchain_list.nim b/beacon_chain/consensus_object_pools/blockchain_list.nim index 4d909339ed..60c0fd699e 100644 --- a/beacon_chain/consensus_object_pools/blockchain_list.nim +++ b/beacon_chain/consensus_object_pools/blockchain_list.nim @@ -128,8 +128,7 @@ proc setTail*(clist: ChainListRef, bdata: BlockData) = clist.handle = Opt.some(handle) proc store*(clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars]): - Result[void, string] = + blobs: Opt[BlobSidecars]): Result[void, string] = if clist.handle.isNone(): let filename = clist.path.chainFilePath() @@ -170,8 +169,7 @@ proc checkBlobs(signedBlock: ForkedSignedBeaconBlock, proc addBackfillBlockData*( clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock, - blobsOpt: Opt[BlobSidecars]): - Result[void, VerifierError] = + blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] = doAssert(not(isNil(clist))) logScope: diff --git a/beacon_chain/spec/peerdas_helpers.nim b/beacon_chain/spec/peerdas_helpers.nim index 9da3455b8a..a13eb38d9c 100644 --- a/beacon_chain/spec/peerdas_helpers.nim +++ b/beacon_chain/spec/peerdas_helpers.nim @@ -434,14 +434,6 @@ proc verify_data_column_sidecar_kzg_proofs*(sidecar: DataColumnSidecar): # Check is the sidecar column length = sidecar.kzg_commitments length # and sidecar.kzg_commitments length = sidecar.kzg_proofs length - debugEcho "sidecar column len" - debugEcho sidecar.column.len - - debugEcho "sidecar kzg commitments len" - debugEcho sidecar.kzg_commitments.len - - debugEcho "sidecar kzg proofs len" - debugEcho sidecar.kzg_proofs.len if not (sidecar.column.len == sidecar.kzg_commitments.len): return err("Data column sidecar length is not equal to the kzg_commitments length") From 0c77fb257842d53326a6807dc8b15a6a343d23e2 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Tue, 4 Mar 2025 18:25:30 +0530 Subject: [PATCH 60/60] fixed more styling related reviews --- beacon_chain/nimbus_beacon_node.nim | 3 +-- beacon_chain/sync/sync_queue.nim | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index eebb6005cd..fecd625d13 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -462,8 +462,7 @@ proc initFullNode( maybeFinalized = maybeFinalized) untrustedBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], - maybeFinalized: bool): - Future[Result[void, VerifierError]] {. + maybeFinalized: bool): Future[Result[void, VerifierError]] {. async: (raises: [CancelledError], raw: true).} = clist.untrustedBackfillVerifier(signedBlock, blobs, maybeFinalized) rmanBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock, diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index bc1a6e65a0..70e3c61bab 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -24,8 +24,7 @@ type GetBoolCallback* = proc(): bool {.gcsafe, raises: [].} ProcessingCallback* = proc() {.gcsafe, raises: [].} BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], - maybeFinalized: bool): + blobs: Opt[BlobSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} SyncRange* = object