Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

2.61 fork upgrade changes #1615

Merged
merged 2 commits into from
Jan 9, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions zk/stages/stage_l1_sequencer_sync.go
Original file line number Diff line number Diff line change
@@ -58,10 +58,6 @@ func SpawnL1SequencerSyncStage(
if err != nil {
return err
}
if progress > 0 {
// if we have progress then we can assume that we have the single injected batch already so can just return here
return nil
}
if progress == 0 {
progress = cfg.zkCfg.L1FirstBlock - 1
}
15 changes: 14 additions & 1 deletion zk/stages/stage_witness.go
Original file line number Diff line number Diff line change
@@ -161,6 +161,11 @@ func SpawnStageWitness(
log.Info(fmt.Sprintf("[%s] Executing blocks and collecting witnesses", logPrefix), "from", startBlock, "to", stageInterhashesProgressBlockNo)

now := time.Now()

// used to ensure that any info tree updates for this batch are included in the witness - re-use of an index for example
// won't write to storage so will be missing from the witness but the prover needs it
forcedInfoTreeUpdates := make([]common.Hash, 0)

for _, block := range blocks {
reader.SetBlockNr(block.NumberU64())
tds := state.NewTrieDbState(prevHeader.Root, tx, startBlock-1, nil)
@@ -182,9 +187,17 @@ func SpawnStageWitness(
return fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err)
}

forcedInfoTreeUpdate, err := witness.CheckForForcedInfoTreeUpdate(memHermezDb, block.NumberU64())
if err != nil {
return fmt.Errorf("CheckForForcedInfoTreeUpdate: %w", err)
}
if forcedInfoTreeUpdate != nil {
forcedInfoTreeUpdates = append(forcedInfoTreeUpdates, *forcedInfoTreeUpdate)
}

prevStateRoot = block.Root()

w, err := witness.BuildWitnessFromTrieDbState(ctx, memTx, tds, reader, cfg.forcedContracs, false)
w, err := witness.BuildWitnessFromTrieDbState(ctx, memTx, tds, reader, cfg.forcedContracs, forcedInfoTreeUpdates, false)
if err != nil {
return fmt.Errorf("BuildWitnessFromTrieDbState: %w", err)
}
48 changes: 42 additions & 6 deletions zk/witness/witness.go
Original file line number Diff line number Diff line change
@@ -7,12 +7,13 @@ import (
"math/big"
"time"

"github.com/ledgerwatch/erigon-lib/common"

"github.com/iden3/go-iden3-crypto/keccak256"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/kv"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
@@ -48,7 +49,7 @@ type Generator struct {
chainCfg *chain.Config
zkConfig *ethconfig.Zk
engine consensus.EngineReader
forcedContracts []common.Address
forcedContracts []libcommon.Address
}

func NewGenerator(
@@ -59,7 +60,7 @@ func NewGenerator(
chainCfg *chain.Config,
zkConfig *ethconfig.Zk,
engine consensus.EngineReader,
forcedContracs []common.Address,
forcedContracs []libcommon.Address,
) *Generator {
return &Generator{
dirs: dirs,
@@ -217,7 +218,7 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint
tds.StartNewBuffer()
trieStateWriter := tds.NewTrieStateWriter()

getHeader := func(hash common.Hash, number uint64) *eritypes.Header {
getHeader := func(hash libcommon.Hash, number uint64) *eritypes.Header {
h, e := g.blockReader.Header(ctx, tx, hash, number)
if e != nil {
log.Error("getHeader error", "number", number, "hash", hash, "err", e)
@@ -230,6 +231,10 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint
reader := state.NewPlainState(tx, blocks[0].NumberU64(), systemcontracts.SystemContractCodeLookup[g.chainCfg.ChainName])
defer reader.Close()

// used to ensure that any info tree updates for this batch are included in the witness - re-use of an index for example
// won't write to storage so will be missing from the witness but the prover needs it
forcedInfoTreeUpdates := make([]libcommon.Hash, 0)

for _, block := range blocks {
blockNum := block.NumberU64()
reader.SetBlockNr(blockNum)
@@ -257,10 +262,18 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint
return nil, fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err)
}

forcedInfoTreeUpdate, err := CheckForForcedInfoTreeUpdate(hermezDb, blockNum)
if err != nil {
return nil, fmt.Errorf("CheckForForcedInfoTreeUpdate: %w", err)
}
if forcedInfoTreeUpdate != nil {
forcedInfoTreeUpdates = append(forcedInfoTreeUpdates, *forcedInfoTreeUpdate)
}

prevStateRoot = block.Root()
}

witness, err := BuildWitnessFromTrieDbState(ctx, rwtx, tds, reader, g.forcedContracts, witnessFull)
witness, err := BuildWitnessFromTrieDbState(ctx, rwtx, tds, reader, g.forcedContracts, forcedInfoTreeUpdates, witnessFull)
if err != nil {
return nil, fmt.Errorf("BuildWitnessFromTrieDbState: %w", err)
}
@@ -285,3 +298,26 @@ func (g *Generator) generateMockWitness(batchNum uint64, blocks []*eritypes.Bloc

return mockWitness, nil
}

func CheckForForcedInfoTreeUpdate(reader *hermez_db.HermezDbReader, blockNum uint64) (*libcommon.Hash, error) {
// check if there were any info tree index updates for this block number
index, err := reader.GetBlockL1InfoTreeIndex(blockNum)
if err != nil {
return nil, fmt.Errorf("failed to check for block info tree index: %w", err)
}
var result *libcommon.Hash
if index != 0 {
// we need to load this info tree index to get the storage slot address to force witness inclusion
infoTreeIndex, err := reader.GetL1InfoTreeUpdate(index)
if err != nil {
return nil, fmt.Errorf("failed to get info tree index: %w", err)
}
d1 := common.LeftPadBytes(infoTreeIndex.GER.Bytes(), 32)
d2 := common.LeftPadBytes(state.GLOBAL_EXIT_ROOT_STORAGE_POS.Bytes(), 32)
mapKey := keccak256.Hash(d1, d2)
mkh := libcommon.BytesToHash(mapKey)
result = &mkh
}

return result, nil
}
23 changes: 22 additions & 1 deletion zk/witness/witness_utils.go
Original file line number Diff line number Diff line change
@@ -119,7 +119,7 @@ type trieDbState interface {
ResolveSMTRetainList(inclusion map[common.Address][]common.Hash) (*trie.RetainList, error)
}

func BuildWitnessFromTrieDbState(ctx context.Context, tx kv.Tx, tds trieDbState, reader *corestate.PlainState, forcedContracts []common.Address, witnessFull bool) (witness *trie.Witness, err error) {
func BuildWitnessFromTrieDbState(ctx context.Context, tx kv.Tx, tds trieDbState, reader *corestate.PlainState, forcedContracts []common.Address, forcedInfoTreeUpdates []common.Hash, witnessFull bool) (witness *trie.Witness, err error) {
var rl trie.RetainDecider
// if full is true, we will send all the nodes to the witness
rl = &trie.AlwaysTrueRetainDecider{}
@@ -136,6 +136,27 @@ func BuildWitnessFromTrieDbState(ctx context.Context, tx kv.Tx, tds trieDbState,
}
}

// ensure that the ger manager is in the inclusion list if there are forced info tree updates
if len(forcedInfoTreeUpdates) > 0 {
if _, ok := inclusion[coreState.GER_MANAGER_ADDRESS]; !ok {
inclusion[coreState.GER_MANAGER_ADDRESS] = []common.Hash{}
}
}

// add any forced info tree updates to the inclusion list that aren't already there
for _, forced := range forcedInfoTreeUpdates {
skip := false
for _, hash := range inclusion[coreState.GER_MANAGER_ADDRESS] {
if hash == forced {
skip = true
break
}
}
if !skip {
inclusion[coreState.GER_MANAGER_ADDRESS] = append(inclusion[coreState.GER_MANAGER_ADDRESS], forced)
}
}

rl, err = tds.ResolveSMTRetainList(inclusion)
if err != nil {
return nil, err