Skip to content

Commit

Permalink
feat: split compressed proofs across 2 clusters
Browse files Browse the repository at this point in the history
  • Loading branch information
ratankaliani committed Feb 1, 2025
1 parent aa6efa6 commit 823056e
Show file tree
Hide file tree
Showing 14 changed files with 79 additions and 116 deletions.
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
url = https://github.com/succinctlabs/sp1-contracts
[submodule "contracts/lib/solady"]
path = contracts/lib/solady
url = https://github.com/vectorized/solady
url = https://github.com/Vectorized/solady
[submodule "contracts/lib/openzeppelin-contracts-upgradeable"]
path = contracts/lib/openzeppelin-contracts-upgradeable
url = https://github.com/OpenZeppelin/openzeppelin-contracts-upgradeable
Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ bincode = "1.3.3"
base64 = "0.22.1"
tower-http = { version = "0.5.2", features = ["limit"] }
kzg-rs = { version = "0.2.2" }
rand = "0.8.5"

# program tracing
tracing = { version = "0.1.40", default-features = false }
Expand Down
2 changes: 1 addition & 1 deletion contracts/lib/optimism
Submodule optimism updated 1672 files
2 changes: 1 addition & 1 deletion contracts/src/fp/OPSuccinctFaultDisputeGame.sol
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import "src/fp/lib/Errors.sol";
import {AggregationOutputs} from "src/lib/Types.sol";

// Interfaces
import {ISemver} from "src/universal/interfaces/ISemver.sol";
import {ISemver} from "src/universal/ISemver.sol";
import {IDisputeGameFactory} from "src/dispute/interfaces/IDisputeGameFactory.sol";
import {IDisputeGame} from "src/dispute/interfaces/IDisputeGame.sol";
import {ISP1Verifier} from "@sp1-contracts/src/ISP1Verifier.sol";
Expand Down
2 changes: 1 addition & 1 deletion contracts/src/validity/OPSuccinctDisputeGame.sol
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ pragma solidity ^0.8.15;
import {OPSuccinctL2OutputOracle} from "./OPSuccinctL2OutputOracle.sol";
import {CWIA} from "@solady-v0.0.281/utils/legacy/CWIA.sol";
import {LibBytes} from "@solady-v0.0.281/utils/LibBytes.sol";
import {ISemver} from "@optimism/src/universal/interfaces/ISemver.sol";
import {ISemver} from "@optimism/src/universal/ISemver.sol";
import {IDisputeGame} from "@optimism/src/dispute/interfaces/IDisputeGame.sol";
import {Claim, GameStatus, GameType, GameTypes, Hash, Timestamp} from "@optimism/src/dispute/lib/Types.sol";
import {GameNotInProgress, OutOfOrderResolution} from "@optimism/src/dispute/lib/Errors.sol";
Expand Down
2 changes: 1 addition & 1 deletion contracts/src/validity/OPSuccinctDisputeGameFactory.sol
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ pragma solidity ^0.8.15;

import {IDisputeGame} from "@optimism/src/dispute/interfaces/IDisputeGame.sol";
import {LibCWIA} from "@solady-v0.0.281/utils/legacy/LibCWIA.sol";
import {ISemver} from "@optimism/src/universal/interfaces/ISemver.sol";
import {ISemver} from "@optimism/src/universal/ISemver.sol";

contract OPSuccinctDisputeGameFactory is ISemver {
using LibCWIA for address;
Expand Down
2 changes: 1 addition & 1 deletion contracts/src/validity/OPSuccinctL2OutputOracle.sol
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
pragma solidity ^0.8.15;

import {Initializable} from "@openzeppelin/contracts/proxy/utils/Initializable.sol";
import {ISemver} from "@optimism/src/universal/interfaces/ISemver.sol";
import {ISemver} from "@optimism/src/universal/ISemver.sol";
import {Types} from "@optimism/src/libraries/Types.sol";
import {AggregationOutputs} from "src/lib/Types.sol";
import {Constants} from "@optimism/src/libraries/Constants.sol";
Expand Down
2 changes: 1 addition & 1 deletion proposer/op/proposer/db/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,4 +460,4 @@ func (db *ProofDB) GetProofRequestsWithBlockRangeAndStatus(proofType proofreques
return nil, fmt.Errorf("failed to query proofs with block range and status: %w", err)
}
return proofs, nil
}
}
81 changes: 0 additions & 81 deletions proposer/op/proposer/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/slack-go/slack"

// Original Optimism Bindings

Expand Down Expand Up @@ -302,60 +301,6 @@ func (l *L2OutputSubmitter) GetProposerMetrics(ctx context.Context) (opsuccinctm
return metrics, nil
}

// SendSlackNotification sends a Slack notification with the proposer metrics.
func (l *L2OutputSubmitter) SendSlackNotification(proposerMetrics opsuccinctmetrics.ProposerMetrics) error {
if l.Cfg.SlackToken == "" {
l.Log.Info("Slack notifications disabled, token not set")
return nil // Slack notifications disabled if token not set
}

api := slack.New(l.Cfg.SlackToken)
channelID := "op-succinct-tests"

ctx, cancel := context.WithTimeout(l.ctx, l.Cfg.NetworkTimeout)
defer cancel()

rollupClient, err := l.RollupProvider.RollupClient(ctx)
if err != nil {
return fmt.Errorf("getting rollup client: %w", err)
}
cfg, err := rollupClient.RollupConfig(ctx)
if err != nil {
return fmt.Errorf("getting rollup config: %w", err)
}
l2BlockTime := cfg.BlockTime

// Get the number of minutes behind the L2 finalized block the contract is.
minutesBehind := (proposerMetrics.L2FinalizedBlock - proposerMetrics.LatestContractL2Block) * l2BlockTime / 60

message := fmt.Sprintf("*Chain %d Proposer Metrics*:\n"+
"Contract is %d minutes behind L2 Finalized\n"+
"| L2 Unsafe | L2 Finalized | Contract L2 | Proven L2 | Min to Agg |\n"+
"| %-9d | %-12d | %-11d | %-9d | %-9d |\n"+
"| Proving | Witness Gen | Unrequested |\n"+
"| %-9d | %-11d | %-11d |",
l.Cfg.L2ChainID,
minutesBehind,
proposerMetrics.L2UnsafeHeadBlock,
proposerMetrics.L2FinalizedBlock,
proposerMetrics.LatestContractL2Block,
proposerMetrics.HighestProvenContiguousL2Block,
proposerMetrics.MinBlockToProveToAgg,
proposerMetrics.NumProving,
proposerMetrics.NumWitnessgen,
proposerMetrics.NumUnrequested)

_, _, err = api.PostMessage(
channelID,
slack.MsgOptionText(message, false),
)
if err != nil {
return fmt.Errorf("error sending Slack notification: %w", err)
}

return nil
}

func (l *L2OutputSubmitter) SubmitAggProofs(ctx context.Context) error {
// Get the latest output index from the L2OutputOracle contract
latestBlockNumber, err := l.l2ooContract.LatestBlockNumber(&bind.CallOpts{Context: ctx})
Expand Down Expand Up @@ -690,16 +635,6 @@ func (l *L2OutputSubmitter) loopL2OO(ctx context.Context) {
if err != nil {
l.Log.Error("failed to submit agg proofs", "err", err)
}
case <-slackMetricsTicker.C:
metrics, err := l.GetProposerMetrics(ctx)
if err != nil {
l.Log.Error("failed to get metrics for Slack notification", "err", err)
continue
}
err = l.SendSlackNotification(metrics)
if err != nil {
l.Log.Error("failed to send Slack notification", "err", err)
}
case <-l.done:
return
}
Expand Down Expand Up @@ -749,22 +684,6 @@ func (l *L2OutputSubmitter) checkpointBlockHash(ctx context.Context) (uint64, co
blockHash := header.Hash()
blockNumber := header.Number

// Check if the block hash has ALREADY been checkpointed on the L2OO contract.
// If it has, we can skip the checkpointing step.
contract, err := opsuccinctbindings.NewOPSuccinctL2OutputOracleCaller(*l.Cfg.L2OutputOracleAddr, l.L1Client)
if err != nil {
return 0, common.Hash{}, err
}
maybeBlockHash, err := contract.HistoricBlockHashes(&bind.CallOpts{Context: cCtx}, blockNumber)
if err != nil {
return 0, common.Hash{}, err
}
if maybeBlockHash != (common.Hash{}) {
l.Log.Info("Block hash already checkpointed on L2OO contract", "block_number", blockNumber, "block_hash", blockHash)
return blockNumber.Uint64(), blockHash, nil
}

// If not, send a transaction to checkpoint the blockhash on the L2OO contract.
var receipt *types.Receipt
data, err := l.CheckpointBlockHashTxData(blockNumber)
if err != nil {
Expand Down
34 changes: 26 additions & 8 deletions proposer/op/proposer/prove.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ func (l *L2OutputSubmitter) RetryRequest(req *ent.ProofRequest, status ProofStat
unexecutable := status.ExecutionStatus == SP1ExecutionStatusUnexecutable
spanProof := req.Type == proofrequest.TypeSPAN
multiBlockRange := req.EndBlock-req.StartBlock > 1

// Get the number of failed requests with the same block range and status.
prevFailedReq, err := l.db.GetProofRequestsWithBlockRangeAndStatus(req.Type, req.StartBlock, req.EndBlock, proofrequest.StatusFAILED)
if err != nil {
Expand Down Expand Up @@ -149,18 +148,37 @@ func (l *L2OutputSubmitter) RequestQueuedProofs(ctx context.Context) error {

if nextProofToRequest.Type == proofrequest.TypeAGG {
if nextProofToRequest.L1BlockHash == "" {
blockNumber, blockHash, err := l.checkpointBlockHash(ctx)
// Check if there's an existing agg proof with the same block range and a checkpointed L1BlockHash
existingProofs, err := l.db.GetProofRequestsWithBlockRangeAndStatus(proofrequest.TypeAGG, nextProofToRequest.StartBlock, nextProofToRequest.EndBlock, proofrequest.StatusFAILED)
if err != nil {
l.Log.Error("failed to checkpoint block hash", "err", err)
l.Log.Error("failed to check for existing agg proof", "err", err)
return err
}
nextProofToRequest, err = l.db.AddL1BlockInfoToAggRequest(nextProofToRequest.StartBlock, nextProofToRequest.EndBlock, blockNumber, blockHash.Hex())
if err != nil {
l.Log.Error("failed to add L1 block info to AGG request", "err", err)
// Loop over existing proofs and if any of them have a checkpointed L1BlockHash, add it to the next proof to request.
for _, proof := range existingProofs {
if proof.L1BlockHash != "" {
nextProofToRequest, err = l.db.AddL1BlockInfoToAggRequest(nextProofToRequest.StartBlock, nextProofToRequest.EndBlock, proof.L1BlockNumber, proof.L1BlockHash)
if err != nil {
l.Log.Error("failed to add L1 block info from existing checkpointed proof to AGG request", "err", err)
return err
}
break
}
}

// wait for the next loop so that we have the version with the block info added
return nil
// If the proof still doesn't have a L1BlockHash, checkpoint the block hash and add it to the request.
if nextProofToRequest.L1BlockHash == "" {
blockNumber, blockHash, err := l.checkpointBlockHash(ctx)
if err != nil {
l.Log.Error("failed to checkpoint block hash", "err", err)
return err
}
nextProofToRequest, err = l.db.AddL1BlockInfoToAggRequest(nextProofToRequest.StartBlock, nextProofToRequest.EndBlock, blockNumber, blockHash.Hex())
if err != nil {
l.Log.Error("failed to add L1 block info to AGG request", "err", err)
return err
}
}
} else {
l.Log.Info("found agg proof with already checkpointed l1 block info")
}
Expand Down
2 changes: 2 additions & 0 deletions proposer/succinct/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,7 @@ base64.workspace = true
tower-http.workspace = true
serde_repr = "0.1.19"

rand.workspace = true

[build-dependencies]
op-succinct-build-utils.workspace = true
16 changes: 15 additions & 1 deletion proposer/succinct/bin/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use op_succinct_proposer::{
AggProofRequest, ProofResponse, ProofStatus, SpanProofRequest, SuccinctProposerConfig,
ValidateConfigRequest, ValidateConfigResponse,
};
use rand::Rng;
use sp1_sdk::{
network::{
proto::network::{ExecutionStatus, FulfillmentStatus},
Expand Down Expand Up @@ -182,7 +183,20 @@ async fn request_span_proof(
}
};

let client = ProverClient::builder().network().build();
let cluster_1_weight = env::var("CLUSTER_1_WEIGHT")
.unwrap_or_else(|_| "50".to_string())
.parse::<u8>()
.unwrap_or(50);

let private_key = if rand::thread_rng().gen_range(0..100) < cluster_1_weight {
env::var("NETWORK_PRIVATE_KEY_1").unwrap()
} else {
env::var("NETWORK_PRIVATE_KEY_2").unwrap()
};
let client = ProverClient::builder()
.network()
.private_key(&private_key)
.build();
let proof_id = client
.prove(&state.range_pk, &sp1_stdin)
.compressed()
Expand Down
46 changes: 27 additions & 19 deletions utils/host/src/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ impl OPSuccinctDataFetcher {
N: Network,
{
let latest_block = provider
.get_block(BlockId::latest(), BlockTransactionsKind::Hashes)
.get_block(BlockId::finalized(), BlockTransactionsKind::Hashes)
.await?;
let mut low = 0;
let mut high = if let Some(block) = latest_block {
Expand Down Expand Up @@ -749,10 +749,14 @@ impl OPSuccinctDataFetcher {

// FIXME: Investigate requirement for L1 head offset beyond batch posting block with safe head > L2 end block.
let l1_head_number = l1_head_number + 20;
// The new L1 header requested should not be greater than the latest L1 header.
let latest_l1_header = self.get_l1_header(BlockId::latest()).await?;
let l1_head_hash = match l1_head_number > latest_l1_header.number {
true => latest_l1_header.hash_slow(),
// The new L1 header requested should not be greater than the finalized L1 header minus 10 blocks.
let finalized_l1_header = self.get_l1_header(BlockId::finalized()).await?;

let l1_head_hash = match l1_head_number > finalized_l1_header.number {
true => self
.get_l1_header(finalized_l1_header.number.into())
.await?
.hash_slow(),
false => self.get_l1_header(l1_head_number.into()).await?.hash_slow(),
};

Expand Down Expand Up @@ -816,25 +820,29 @@ impl OPSuccinctDataFetcher {
/// Get the L1 block time in seconds.
#[allow(dead_code)]
async fn get_l1_block_time(&self) -> Result<u64> {
let l1_head = self.get_l1_header(BlockId::latest()).await?;
let finalized_l1_header = self.get_l1_header(BlockId::finalized()).await?;

let l1_head_minus_1 = l1_head.number - 1;
let l1_block_minus_1 = self.get_l1_header(l1_head_minus_1.into()).await?;
Ok(l1_head.timestamp - l1_block_minus_1.timestamp)
let finalized_l1_header_minus_1 = finalized_l1_header.number - 1;
let l1_block_minus_1 = self
.get_l1_header(finalized_l1_header_minus_1.into())
.await?;
Ok(finalized_l1_header.timestamp - l1_block_minus_1.timestamp)
}

/// Get the L2 block time in seconds.
pub async fn get_l2_block_time(&self) -> Result<u64> {
let l2_head = self.get_l2_header(BlockId::latest()).await?;
let finalized_l2_header = self.get_l2_header(BlockId::finalized()).await?;

let l2_head_minus_1 = l2_head.number - 1;
let l2_block_minus_1 = self.get_l2_header(l2_head_minus_1.into()).await?;
Ok(l2_head.timestamp - l2_block_minus_1.timestamp)
let finalized_l2_header_minus_1 = finalized_l2_header.number - 1;
let l2_block_minus_1 = self
.get_l2_header(finalized_l2_header_minus_1.into())
.await?;
Ok(finalized_l2_header.timestamp - l2_block_minus_1.timestamp)
}

/// Get the L1 block from which the `l2_end_block` can be derived.
pub async fn get_l1_head_with_safe_head(&self, l2_end_block: u64) -> Result<(B256, u64)> {
let latest_l1_header = self.get_l1_header(BlockId::latest()).await?;
let latest_l1_header = self.get_l1_header(BlockId::finalized()).await?;

// Get the l1 origin of the l2 end block.
let l2_end_block_hex = format!("0x{:x}", l2_end_block);
Expand Down Expand Up @@ -904,12 +912,12 @@ impl OPSuccinctDataFetcher {

// Get L1 head.
let l2_block_timestamp = self.get_l2_header(l2_end_block.into()).await?.timestamp;
let latest_l1_timestamp = self.get_l1_header(BlockId::latest()).await?.timestamp;
let finalized_l1_timestamp = self.get_l1_header(BlockId::finalized()).await?.timestamp;

// Ensure that the target timestamp is not greater than the latest L1 timestamp.
// Ensure that the target timestamp is not greater than the finalized L1 timestamp.
let target_timestamp = min(
l2_block_timestamp + (max_batch_post_delay_minutes * 60),
latest_l1_timestamp,
finalized_l1_timestamp,
);
Ok(self.find_l1_block_by_timestamp(target_timestamp).await?)
}
Expand Down Expand Up @@ -950,8 +958,8 @@ impl OPSuccinctDataFetcher {

/// Check if the safeDB is activated on the L2 node.
pub async fn is_safe_db_activated(&self) -> Result<bool> {
let l1_block = self.get_l1_header(BlockId::latest()).await?;
let l1_block_number_hex = format!("0x{:x}", l1_block.number);
let finalized_l1_header = self.get_l1_header(BlockId::finalized()).await?;
let l1_block_number_hex = format!("0x{:x}", finalized_l1_header.number);
let result: Result<SafeHeadResponse, _> = self
.fetch_rpc_data_with_mode(
RPCMode::L2Node,
Expand Down

0 comments on commit 823056e

Please sign in to comment.