diff --git a/cicd/devnet/start.sh b/cicd/devnet/start.sh index 2673e6f1c0c6..bbf77552a8b5 100755 --- a/cicd/devnet/start.sh +++ b/cicd/devnet/start.sh @@ -95,6 +95,23 @@ else gc_mode=$GC_MODE fi +fastsync_args="" +if test -n "$FASTSYNC_PIVOT_NUMBER" +then + echo "FASTSYNC_PIVOT_NUMBER found, set to $FASTSYNC_PIVOT_NUMBER" + fastsync_args="${fastsync_args} --fastsyncpivotnumber ${FASTSYNC_PIVOT_NUMBER}" +fi +if test -n "$FASTSYNC_PIVOT_HASH" +then + echo "FASTSYNC_PIVOT_HASH found, set to $FASTSYNC_PIVOT_HASH" + fastsync_args="${fastsync_args} --fastsyncpivothash ${FASTSYNC_PIVOT_HASH}" +fi +if test -n "$FASTSYNC_PIVOT_ROOT" +then + echo "FASTSYNC_PIVOT_ROOT found, set to $FASTSYNC_PIVOT_ROOT" + fastsync_args="${fastsync_args} --fastsyncpivotroot ${FASTSYNC_PIVOT_ROOT}" +fi + miner_gaslimit=50000000 if test -z "$MINER_GASLIMIT" then @@ -133,5 +150,6 @@ XDC --ethstats ${netstats} \ --miner-gasprice "1" --miner-gaslimit "${miner_gaslimit}" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ --store-reward \ +${fastsync_args} \ --ws --ws-addr=0.0.0.0 --ws-port $ws_port \ --ws-origins "*" 2>&1 >>/work/xdcchain/xdc.log | tee -a /work/xdcchain/xdc.log diff --git a/cicd/local/start.sh b/cicd/local/start.sh index d7b5a576efb7..d220ae95c621 100755 --- a/cicd/local/start.sh +++ b/cicd/local/start.sh @@ -128,6 +128,22 @@ else fi netstats="${NODE_NAME}-${wallet}:$ethstats_secret@$ethstats_address" +fastsync_args="" +if test -n "$FASTSYNC_PIVOT_NUMBER" +then + echo "FASTSYNC_PIVOT_NUMBER found, set to $FASTSYNC_PIVOT_NUMBER" + fastsync_args="${fastsync_args} --fastsyncpivotnumber ${FASTSYNC_PIVOT_NUMBER}" +fi +if test -n "$FASTSYNC_PIVOT_HASH" +then + echo "FASTSYNC_PIVOT_HASH found, set to $FASTSYNC_PIVOT_HASH" + fastsync_args="${fastsync_args} --fastsyncpivothash ${FASTSYNC_PIVOT_HASH}" +fi +if test -n "$FASTSYNC_PIVOT_ROOT" +then + echo "FASTSYNC_PIVOT_ROOT found, set to $FASTSYNC_PIVOT_ROOT" + fastsync_args="${fastsync_args} --fastsyncpivotroot ${FASTSYNC_PIVOT_ROOT}" +fi echo "Running a node with wallet: ${wallet} at IP: ${instance_ip}" echo "Starting nodes with $bootnodes ..." @@ -148,5 +164,6 @@ XDC \ --miner-gasprice "1" --miner-gaslimit "${miner_gaslimit}" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ --store-reward \ +${fastsync_args} \ --ws --ws-addr=0.0.0.0 --ws-port $ws_port \ --ws-origins "*" 2>&1 >>/work/xdcchain/xdc.log | tee -a /work/xdcchain/xdc.log diff --git a/cicd/mainnet/start.sh b/cicd/mainnet/start.sh index 3be3350a2f6f..b57be607ae95 100755 --- a/cicd/mainnet/start.sh +++ b/cicd/mainnet/start.sh @@ -100,6 +100,23 @@ fi netstats="${NODE_NAME}-${wallet}:$ethstats_secret@$ethstats_address" +fastsync_args="" +if test -n "$FASTSYNC_PIVOT_NUMBER" +then + echo "FASTSYNC_PIVOT_NUMBER found, set to $FASTSYNC_PIVOT_NUMBER" + fastsync_args="${fastsync_args} --fastsyncpivotnumber ${FASTSYNC_PIVOT_NUMBER}" +fi +if test -n "$FASTSYNC_PIVOT_HASH" +then + echo "FASTSYNC_PIVOT_HASH found, set to $FASTSYNC_PIVOT_HASH" + fastsync_args="${fastsync_args} --fastsyncpivothash ${FASTSYNC_PIVOT_HASH}" +fi +if test -n "$FASTSYNC_PIVOT_ROOT" +then + echo "FASTSYNC_PIVOT_ROOT found, set to $FASTSYNC_PIVOT_ROOT" + fastsync_args="${fastsync_args} --fastsyncpivotroot ${FASTSYNC_PIVOT_ROOT}" +fi + INSTANCE_IP=$(curl https://checkip.amazonaws.com) echo "Running a node with wallet: ${wallet} at IP: ${INSTANCE_IP}" @@ -120,5 +137,6 @@ XDC --ethstats ${netstats} \ --miner-gasprice "1" --miner-gaslimit "420000000" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ --store-reward \ + ${fastsync_args} \ --ws --ws-addr=0.0.0.0 --ws-port $ws_port \ --ws-origins "*" 2>&1 >>/work/xdcchain/xdc.log | tee -a /work/xdcchain/xdc.log diff --git a/cicd/testnet/start.sh b/cicd/testnet/start.sh index d216f1ef87bb..594bb0263fb9 100755 --- a/cicd/testnet/start.sh +++ b/cicd/testnet/start.sh @@ -113,6 +113,23 @@ fi netstats="${NODE_NAME}-${wallet}:$ethstats_secret@$ethstats_address" +fastsync_args="" +if test -n "$FASTSYNC_PIVOT_NUMBER" +then + echo "FASTSYNC_PIVOT_NUMBER found, set to $FASTSYNC_PIVOT_NUMBER" + fastsync_args="${fastsync_args} --fastsyncpivotnumber ${FASTSYNC_PIVOT_NUMBER}" +fi +if test -n "$FASTSYNC_PIVOT_HASH" +then + echo "FASTSYNC_PIVOT_HASH found, set to $FASTSYNC_PIVOT_HASH" + fastsync_args="${fastsync_args} --fastsyncpivothash ${FASTSYNC_PIVOT_HASH}" +fi +if test -n "$FASTSYNC_PIVOT_ROOT" +then + echo "FASTSYNC_PIVOT_ROOT found, set to $FASTSYNC_PIVOT_ROOT" + fastsync_args="${fastsync_args} --fastsyncpivotroot ${FASTSYNC_PIVOT_ROOT}" +fi + INSTANCE_IP=$(curl https://checkip.amazonaws.com) @@ -134,5 +151,6 @@ XDC --ethstats ${netstats} \ --miner-gasprice "1" --miner-gaslimit "420000000" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ --store-reward \ +${fastsync_args} \ --ws --ws-addr=0.0.0.0 --ws-port $ws_port \ --ws-origins "*" 2>&1 >>/work/xdcchain/xdc.log | tee -a /work/xdcchain/xdc.log diff --git a/cmd/XDC/main.go b/cmd/XDC/main.go index 43166fc1a1a8..b6b918def413 100644 --- a/cmd/XDC/main.go +++ b/cmd/XDC/main.go @@ -82,6 +82,9 @@ var ( utils.TxPoolGlobalQueueFlag, utils.TxPoolLifetimeFlag, utils.SyncModeFlag, + utils.FastSyncPivotNumberFlag, + utils.FastSyncPivotHashFlag, + utils.FastSyncPivotRootFlag, utils.GCModeFlag, // utils.LightServFlag, // deprecated // utils.LightPeersFlag, // deprecated diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 2b39e18512ca..543c33b96567 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -179,6 +179,24 @@ var ( Value: ethconfig.Defaults.SyncMode.String(), Category: flags.EthCategory, } + FastSyncPivotNumberFlag = &cli.Uint64Flag{ + Name: "fastsyncpivotnumber", + Usage: "Pivot block number for fast sync (0 = use default calculation)", + Value: 0, + Category: flags.EthCategory, + } + FastSyncPivotHashFlag = &cli.StringFlag{ + Name: "fastsyncpivothash", + Usage: "Pivot block hash for fast sync verification (hex string, must be set if fastsyncpivotnumber is set)", + Value: "", + Category: flags.EthCategory, + } + FastSyncPivotRootFlag = &cli.StringFlag{ + Name: "fastsyncpivotroot", + Usage: "State root of pivot block for fast sync state download (hex string, zero = use latest.Root)", + Value: "", + Category: flags.EthCategory, + } GCModeFlag = &cli.StringFlag{ Name: "gcmode", Usage: `Blockchain garbage collection mode ("full", "archive")`, @@ -1521,6 +1539,30 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { Fatalf("invalid --syncmode flag: %v", err) } } + pivotNumberSet := ctx.IsSet(FastSyncPivotNumberFlag.Name) + pivotHashSet := ctx.IsSet(FastSyncPivotHashFlag.Name) + pivotRootSet := ctx.IsSet(FastSyncPivotRootFlag.Name) + pivotHash := ctx.String(FastSyncPivotHashFlag.Name) + pivotRoot := ctx.String(FastSyncPivotRootFlag.Name) + + if pivotNumberSet { + if !pivotHashSet || pivotHash == "" { + Fatalf("--%s must be set if --%s is set", FastSyncPivotHashFlag.Name, FastSyncPivotNumberFlag.Name) + } + if !pivotRootSet || pivotRoot == "" { + Fatalf("--%s must be set if --%s is set", FastSyncPivotRootFlag.Name, FastSyncPivotNumberFlag.Name) + } + cfg.FastSyncPivotNumber = ctx.Uint64(FastSyncPivotNumberFlag.Name) + cfg.FastSyncPivotHash = common.HexToHash(pivotHash) + cfg.FastSyncPivotRoot = common.HexToHash(pivotRoot) + } else { + if pivotHashSet { + Fatalf("--%s must not be set without --%s", FastSyncPivotHashFlag.Name, FastSyncPivotNumberFlag.Name) + } + if pivotRootSet { + Fatalf("--%s must not be set without --%s", FastSyncPivotRootFlag.Name, FastSyncPivotNumberFlag.Name) + } + } if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheDatabaseFlag.Name) { cfg.DatabaseCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100 diff --git a/consensus/XDPoS/engines/engine_v1/engine.go b/consensus/XDPoS/engines/engine_v1/engine.go index cfa11cdf4d54..43cf42fa8316 100644 --- a/consensus/XDPoS/engines/engine_v1/engine.go +++ b/consensus/XDPoS/engines/engine_v1/engine.go @@ -243,32 +243,34 @@ func (x *XDPoS_v1) verifyCascadingFields(chain consensus.ChainReader, header *ty return x.verifySeal(chain, header, parents, fullVerify) } - /* - BUG: snapshot returns wrong signers sometimes - when it happens we get the signers list by requesting smart contract - */ - // Retrieve the snapshot needed to verify this header and cache it - snap, err := x.snapshot(chain, number-1, header.ParentHash, parents, nil) - if err != nil { - return err - } + if fullVerify { + /* + BUG: snapshot returns wrong signers sometimes + when it happens we get the signers list by requesting smart contract + */ + // Retrieve the snapshot needed to verify this header and cache it + snap, err := x.snapshot(chain, number-1, header.ParentHash, parents, nil) + if err != nil { + return err + } - signers := snap.GetSigners() - err = x.checkSignersOnCheckpoint(chain, header, signers) - if err == nil { - return x.verifySeal(chain, header, parents, fullVerify) - } + signers := snap.GetSigners() + err = x.checkSignersOnCheckpoint(chain, header, signers) + if err == nil { + return x.verifySeal(chain, header, parents, fullVerify) + } - signers, err = x.getSignersFromContract(chain, header) - if err != nil { - return err - } - err = x.checkSignersOnCheckpoint(chain, header, signers) - if err == nil { - return x.verifySeal(chain, header, parents, fullVerify) + signers, err = x.getSignersFromContract(chain, header) + if err != nil { + return err + } + err = x.checkSignersOnCheckpoint(chain, header, signers) + if err == nil { + return x.verifySeal(chain, header, parents, fullVerify) + } } - return err + return x.verifySeal(chain, header, parents, fullVerify) } func (x *XDPoS_v1) checkSignersOnCheckpoint(chain consensus.ChainReader, header *types.Header, signers []common.Address) error { diff --git a/consensus/XDPoS/engines/engine_v2/engine.go b/consensus/XDPoS/engines/engine_v2/engine.go index a2da3d8616fa..c12af8ef5224 100644 --- a/consensus/XDPoS/engines/engine_v2/engine.go +++ b/consensus/XDPoS/engines/engine_v2/engine.go @@ -254,9 +254,9 @@ func (x *XDPoS_v2) initial(chain consensus.ChainReader, header *types.Header) er return fmt.Errorf("masternodes are empty v2 switch number: %d", x.config.V2.SwitchBlock.Uint64()) } - snap := newSnapshot(lastGapNum, lastGapHeader.Hash(), masternodes) + snap := NewSnapshot(lastGapNum, lastGapHeader.Hash(), masternodes) x.snapshots.Add(snap.Hash, snap) - err = storeSnapshot(snap, x.db) + err = StoreSnapshot(snap, x.db) if err != nil { log.Error("[initial] Error while store snapshot", "error", err) return err @@ -594,11 +594,11 @@ func (x *XDPoS_v2) UpdateMasternodes(chain consensus.ChainReader, header *types. } x.lock.RLock() - snap := newSnapshot(number, header.Hash(), masterNodes) + snap := NewSnapshot(number, header.Hash(), masterNodes) log.Info("[UpdateMasternodes] take snapshot", "number", number, "hash", header.Hash()) x.lock.RUnlock() - err := storeSnapshot(snap, x.db) + err := StoreSnapshot(snap, x.db) if err != nil { log.Error("[UpdateMasternodes] Error while store snapshot", "hash", header.Hash(), "currentRound", x.currentRound, "error", err) return err @@ -740,13 +740,18 @@ func (x *XDPoS_v2) VerifyBlockInfo(blockChainReader consensus.ChainReader, block return nil } -func (x *XDPoS_v2) verifyQC(blockChainReader consensus.ChainReader, quorumCert *types.QuorumCert, parentHeader *types.Header) error { +func (x *XDPoS_v2) verifyQC(blockChainReader consensus.ChainReader, quorumCert *types.QuorumCert, parents []*types.Header) error { if quorumCert == nil { log.Warn("[verifyQC] QC is Nil") return utils.ErrInvalidQC } - epochInfo, err := x.getEpochSwitchInfo(blockChainReader, parentHeader, quorumCert.ProposedBlockInfo.Hash) + // Find the parent header from the parents slice if available + var parentHeader *types.Header + if len(parents) != 0 { + parentHeader = parents[len(parents)-1] + } + epochInfo, err := x.getEpochSwitchInfo(blockChainReader, parents, quorumCert.ProposedBlockInfo.Hash) if err != nil { log.Error("[verifyQC] Error when getting epoch switch Info to verify QC", "Error", err) return errors.New("fail to verify QC due to failure in getting epoch switch info") @@ -934,7 +939,7 @@ func (x *XDPoS_v2) GetMasternodesFromEpochSwitchHeader(epochSwitchHeader *types. // Given header, get master node from the epoch switch block of that epoch func (x *XDPoS_v2) GetMasternodes(chain consensus.ChainReader, header *types.Header) []common.Address { - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, header, header.Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{header}, header.Hash()) if err != nil { log.Error("[GetMasternodes] Adaptor v2 getEpochSwitchInfo has error", "err", err) return []common.Address{} @@ -944,7 +949,7 @@ func (x *XDPoS_v2) GetMasternodes(chain consensus.ChainReader, header *types.Hea // Given header, get master node from the epoch switch block of that epoch func (x *XDPoS_v2) GetPenalties(chain consensus.ChainReader, header *types.Header) []common.Address { - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, header, header.Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{header}, header.Hash()) if err != nil { log.Error("[GetPenalties] Adaptor v2 getEpochSwitchInfo has error", "err", err) return []common.Address{} @@ -953,7 +958,7 @@ func (x *XDPoS_v2) GetPenalties(chain consensus.ChainReader, header *types.Heade } func (x *XDPoS_v2) GetStandbynodes(chain consensus.ChainReader, header *types.Header) []common.Address { - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, header, header.Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{header}, header.Hash()) if err != nil { log.Error("[GetStandbynodes] Adaptor v2 getEpochSwitchInfo has error", "err", err) return []common.Address{} diff --git a/consensus/XDPoS/engines/engine_v2/epochSwitch.go b/consensus/XDPoS/engines/engine_v2/epochSwitch.go index e4b222fce1ff..8b3f262ba484 100644 --- a/consensus/XDPoS/engines/engine_v2/epochSwitch.go +++ b/consensus/XDPoS/engines/engine_v2/epochSwitch.go @@ -11,14 +11,18 @@ import ( ) // Given header and its hash, get epoch switch info from the epoch switch block of that epoch, -// header is allow to be nil. -func (x *XDPoS_v2) getEpochSwitchInfo(chain consensus.ChainReader, header *types.Header, hash common.Hash) (*types.EpochSwitchInfo, error) { +// headers is allow to be nil. headers contain header (as last item) and it parents if any, which can be used to avoid +// fetching from the database during recursive lookups (useful during VerifyHeaders). +func (x *XDPoS_v2) getEpochSwitchInfo(chain consensus.ChainReader, headers []*types.Header, hash common.Hash) (*types.EpochSwitchInfo, error) { epochSwitchInfo, ok := x.epochSwitches.Get(hash) if ok && epochSwitchInfo != nil { log.Debug("[getEpochSwitchInfo] cache hit", "number", epochSwitchInfo.EpochSwitchBlockInfo.Number, "hash", hash.Hex()) return epochSwitchInfo, nil } - h := header + var h *types.Header + if len(headers) > 0 { + h = headers[len(headers)-1] + } if h == nil { log.Debug("[getEpochSwitchInfo] header doesn't provide, get header by hash", "hash", hash.Hex()) h = chain.GetHeaderByHash(hash) @@ -65,18 +69,18 @@ func (x *XDPoS_v2) getEpochSwitchInfo(chain consensus.ChainReader, header *types return nil, fmt.Errorf("masternodes list is empty at epoch switch block %v", h.Number.Uint64()) } - snap, err := x.getSnapshot(chain, h.Number.Uint64(), false) - if err != nil { - log.Error("[getEpochSwitchInfo] Adaptor v2 getSnapshot has error", "err", err) - return nil, err - } penalties := common.ExtractAddressFromBytes(h.Penalties) - candidates := snap.NextEpochCandidates standbynodes := []common.Address{} - if len(masternodes) != len(candidates) { - standbynodes = candidates - standbynodes = common.RemoveItemFromArray(standbynodes, masternodes) - standbynodes = common.RemoveItemFromArray(standbynodes, penalties) + snap, err := x.getSnapshot(chain, h.Number.Uint64(), false) + if err != nil { + log.Warn("[getEpochSwitchInfo] Adaptor v2 getSnapshot has error, cannot get standbynodes", "err", err) + } else { + candidates := snap.NextEpochCandidates + if len(masternodes) != len(candidates) { + standbynodes = candidates + standbynodes = common.RemoveItemFromArray(standbynodes, masternodes) + standbynodes = common.RemoveItemFromArray(standbynodes, penalties) + } } epochSwitchInfo := &types.EpochSwitchInfo{ @@ -98,7 +102,11 @@ func (x *XDPoS_v2) getEpochSwitchInfo(chain consensus.ChainReader, header *types return epochSwitchInfo, nil } - epochSwitchInfo, err = x.getEpochSwitchInfo(chain, nil, h.ParentHash) + var potentialParentHeaders []*types.Header + if len(headers) > 0 { + potentialParentHeaders = headers[:len(headers)-1] + } + epochSwitchInfo, err = x.getEpochSwitchInfo(chain, potentialParentHeaders, h.ParentHash) if err != nil { log.Error("[getEpochSwitchInfo] recursive error", "err", err, "hash", hash.Hex(), "number", h.Number.Uint64()) return nil, err @@ -133,7 +141,7 @@ func (x *XDPoS_v2) isEpochSwitchAtRound(round types.Round, parentHeader *types.H func (x *XDPoS_v2) GetCurrentEpochSwitchBlock(chain consensus.ChainReader, blockNum *big.Int) (uint64, uint64, error) { header := chain.GetHeaderByNumber(blockNum.Uint64()) - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, header, header.Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{header}, header.Hash()) if err != nil { log.Error("[GetCurrentEpochSwitchBlock] Fail to get epoch switch info", "Num", header.Number, "Hash", header.Hash()) return 0, 0, err @@ -187,7 +195,7 @@ func (x *XDPoS_v2) GetEpochSwitchInfoBetween(chain consensus.ChainReader, begin, iteratorNum := end.Number // when iterator is strictly > begin number, do the search for iteratorNum.Cmp(begin.Number) > 0 { - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, iteratorHeader, iteratorHash) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{iteratorHeader}, iteratorHash) if err != nil { log.Error("[GetEpochSwitchInfoBetween] Adaptor v2 getEpochSwitchInfo has error, potentially bug", "err", err) return nil, err diff --git a/consensus/XDPoS/engines/engine_v2/snapshot.go b/consensus/XDPoS/engines/engine_v2/snapshot.go index 7c45a6a107ef..f0439bfaa8cf 100644 --- a/consensus/XDPoS/engines/engine_v2/snapshot.go +++ b/consensus/XDPoS/engines/engine_v2/snapshot.go @@ -23,8 +23,8 @@ type SnapshotV2 struct { NextEpochCandidates []common.Address `json:"masterNodes"` // Set of authorized candidates nodes at this moment for next epoch } -// create new snapshot for next epoch to use -func newSnapshot(number uint64, hash common.Hash, candidates []common.Address) *SnapshotV2 { +// NewSnapshot creates a new snapshot for next epoch to use +func NewSnapshot(number uint64, hash common.Hash, candidates []common.Address) *SnapshotV2 { snap := &SnapshotV2{ Number: number, Hash: hash, @@ -47,8 +47,8 @@ func loadSnapshot(db ethdb.Database, hash common.Hash) (*SnapshotV2, error) { return snap, nil } -// store inserts the SnapshotV2 into the database. -func storeSnapshot(s *SnapshotV2, db ethdb.Database) error { +// StoreSnapshot inserts the SnapshotV2 into the database. +func StoreSnapshot(s *SnapshotV2, db ethdb.Database) error { blob, err := json.Marshal(s) if err != nil { return err diff --git a/consensus/XDPoS/engines/engine_v2/snapshot_test.go b/consensus/XDPoS/engines/engine_v2/snapshot_test.go index fac04f089773..66289eb9f660 100644 --- a/consensus/XDPoS/engines/engine_v2/snapshot_test.go +++ b/consensus/XDPoS/engines/engine_v2/snapshot_test.go @@ -11,7 +11,7 @@ import ( func TestGetMasterNodes(t *testing.T) { masterNodes := []common.Address{{0x4}, {0x3}, {0x2}, {0x1}} - snap := newSnapshot(1, common.Hash{}, masterNodes) + snap := NewSnapshot(1, common.Hash{}, masterNodes) for _, address := range masterNodes { if _, ok := snap.GetMappedCandidates()[address]; !ok { @@ -22,7 +22,7 @@ func TestGetMasterNodes(t *testing.T) { } func TestStoreLoadSnapshot(t *testing.T) { - snap := newSnapshot(1, common.Hash{0x1}, nil) + snap := NewSnapshot(1, common.Hash{0x1}, nil) dir := t.TempDir() db, err := leveldb.New(dir, 256, 0, "", false) if err != nil { @@ -30,7 +30,7 @@ func TestStoreLoadSnapshot(t *testing.T) { } lddb := rawdb.NewDatabase(db) - err = storeSnapshot(snap, lddb) + err = StoreSnapshot(snap, lddb) if err != nil { t.Error("store snapshot failed", err) } diff --git a/consensus/XDPoS/engines/engine_v2/timeout.go b/consensus/XDPoS/engines/engine_v2/timeout.go index 474831fb3fd7..af99ff903033 100644 --- a/consensus/XDPoS/engines/engine_v2/timeout.go +++ b/consensus/XDPoS/engines/engine_v2/timeout.go @@ -61,7 +61,7 @@ func (x *XDPoS_v2) timeoutHandler(blockChainReader consensus.ChainReader, timeou numberOfTimeoutsInPool, pooledTimeouts := x.timeoutPool.Add(timeout) log.Debug("[timeoutHandler] collect timeout", "number", numberOfTimeoutsInPool) - epochInfo, err := x.getEpochSwitchInfo(blockChainReader, blockChainReader.CurrentHeader(), blockChainReader.CurrentHeader().Hash()) + epochInfo, err := x.getEpochSwitchInfo(blockChainReader, []*types.Header{blockChainReader.CurrentHeader()}, blockChainReader.CurrentHeader().Hash()) if err != nil { log.Error("[timeoutHandler] Error when getting epoch switch Info", "error", err) return fmt.Errorf("fail on timeoutHandler due to failure in getting epoch switch info, %s", err) @@ -110,7 +110,7 @@ func (x *XDPoS_v2) onTimeoutPoolThresholdReached(blockChainReader consensus.Chai } func (x *XDPoS_v2) getTCEpochInfo(chain consensus.ChainReader, timeoutRound types.Round) (*types.EpochSwitchInfo, error) { - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, (chain.CurrentHeader()), (chain.CurrentHeader()).Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{chain.CurrentHeader()}, chain.CurrentHeader().Hash()) if err != nil { log.Error("[getTCEpochInfo] Error when getting epoch switch info", "error", err) return nil, fmt.Errorf("fail on getTCEpochInfo due to failure in getting epoch switch info, %s", err) @@ -217,7 +217,7 @@ func (x *XDPoS_v2) sendTimeout(chain consensus.ChainReader) error { } log.Debug("[sendTimeout] is epoch switch when sending out timeout message", "currentNumber", currentNumber, "gapNumber", gapNumber) } else { - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, currentBlockHeader, currentBlockHeader.Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{currentBlockHeader}, currentBlockHeader.Hash()) if err != nil { log.Error("[sendTimeout] Error when trying to get current epoch switch info for a non-epoch block", "currentRound", x.currentRound, "currentBlockNum", currentBlockHeader.Number, "currentBlockHash", currentBlockHeader.Hash(), "epochNum", epochNum) return err diff --git a/consensus/XDPoS/engines/engine_v2/utils.go b/consensus/XDPoS/engines/engine_v2/utils.go index 352e180f8dd5..ce6864bb2376 100644 --- a/consensus/XDPoS/engines/engine_v2/utils.go +++ b/consensus/XDPoS/engines/engine_v2/utils.go @@ -208,7 +208,7 @@ func (x *XDPoS_v2) GetSignersFromSnapshot(chain consensus.ChainReader, header *t func (x *XDPoS_v2) CalculateMissingRounds(chain consensus.ChainReader, header *types.Header) (*utils.PublicApiMissedRoundsMetadata, error) { var missedRounds []utils.MissedRoundInfo - switchInfo, err := x.getEpochSwitchInfo(chain, header, header.Hash()) + switchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{header}, header.Hash()) if err != nil { return nil, err } @@ -344,7 +344,7 @@ func (x *XDPoS_v2) GetBlockByEpochNumber(chain consensus.ChainReader, targetEpoc if currentHeader == nil { return nil, errors.New("current header is nil") } - epochSwitchInfo, err := x.getEpochSwitchInfo(chain, currentHeader, currentHeader.Hash()) + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, []*types.Header{currentHeader}, currentHeader.Hash()) if err != nil { return nil, err } diff --git a/consensus/XDPoS/engines/engine_v2/verifyHeader.go b/consensus/XDPoS/engines/engine_v2/verifyHeader.go index 1185cfafe876..546493a0464f 100644 --- a/consensus/XDPoS/engines/engine_v2/verifyHeader.go +++ b/consensus/XDPoS/engines/engine_v2/verifyHeader.go @@ -92,7 +92,7 @@ func (x *XDPoS_v2) verifyHeader(chain consensus.ChainReader, header *types.Heade return utils.ErrRoundInvalid } - err = x.verifyQC(chain, quorumCert, parent) + err = x.verifyQC(chain, quorumCert, parents) if err != nil { log.Warn("[verifyHeader] fail to verify QC", "QCNumber", quorumCert.ProposedBlockInfo.Number, "QCsigLength", len(quorumCert.Signatures)) return err @@ -134,33 +134,38 @@ func (x *XDPoS_v2) verifyHeader(chain consensus.ChainReader, header *types.Heade return utils.ErrInvalidCheckpointSigners } - localMasterNodes, localPenalties, err := x.calcMasternodes(chain, header.Number, header.ParentHash, round) - masterNodes = localMasterNodes - if err != nil { - log.Error("[verifyHeader] Fail to calculate master nodes list with penalty", "Number", header.Number, "Hash", header.Hash()) - return err - } - - validatorsAddress := common.ExtractAddressFromBytes(header.Validators) - if !utils.CompareSignersLists(localMasterNodes, validatorsAddress) { - for i, addr := range localMasterNodes { - log.Warn("[verifyHeader] localMasterNodes", "i", i, "addr", addr.Hex()) + // if fullVerify, verify masternodes and penalties; else use them inside header + if fullVerify { + localMasterNodes, localPenalties, err := x.calcMasternodes(chain, header.Number, header.ParentHash, round) + masterNodes = localMasterNodes + if err != nil { + log.Error("[verifyHeader] Fail to calculate master nodes list with penalty", "Number", header.Number, "Hash", header.Hash()) + return err } - for i, addr := range validatorsAddress { - log.Warn("[verifyHeader] validatorsAddress", "i", i, "addr", addr.Hex()) - } - return utils.ErrValidatorsNotLegit - } - penaltiesAddress := common.ExtractAddressFromBytes(header.Penalties) - if !utils.CompareSignersLists(localPenalties, penaltiesAddress) { - for i, addr := range localPenalties { - log.Warn("[verifyHeader] localPenalties", "i", i, "addr", addr.Hex()) + validatorsAddress := common.ExtractAddressFromBytes(header.Validators) + if !utils.CompareSignersLists(localMasterNodes, validatorsAddress) { + for i, addr := range localMasterNodes { + log.Warn("[verifyHeader] localMasterNodes", "i", i, "addr", addr.Hex()) + } + for i, addr := range validatorsAddress { + log.Warn("[verifyHeader] validatorsAddress", "i", i, "addr", addr.Hex()) + } + return utils.ErrValidatorsNotLegit } - for i, addr := range penaltiesAddress { - log.Warn("[verifyHeader] penaltiesAddress", "i", i, "addr", addr.Hex()) + + penaltiesAddress := common.ExtractAddressFromBytes(header.Penalties) + if !utils.CompareSignersLists(localPenalties, penaltiesAddress) { + for i, addr := range localPenalties { + log.Warn("[verifyHeader] localPenalties", "i", i, "addr", addr.Hex()) + } + for i, addr := range penaltiesAddress { + log.Warn("[verifyHeader] penaltiesAddress", "i", i, "addr", addr.Hex()) + } + return utils.ErrPenaltiesNotLegit } - return utils.ErrPenaltiesNotLegit + } else { + masterNodes = common.ExtractAddressFromBytes(header.Validators) } } else { if len(header.Validators) != 0 { diff --git a/eth/backend.go b/eth/backend.go index 5ce5da1c3ec4..eb473fbd7560 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -292,6 +292,10 @@ func New(stack *node.Node, config *ethconfig.Config, XDCXServ *XDCx.XDCX, lendin if eth.protocolManager, err = NewProtocolManagerEx(eth.blockchain.Config(), config.SyncMode, networkID, eth.eventMux, eth.txPool, eth.orderPool, eth.lendingPool, eth.engine, eth.blockchain, chainDb); err != nil { return nil, err } + // Set fast sync pivot block if configured + if config.FastSyncPivotNumber != 0 { + eth.protocolManager.downloader.SetPivotBlock(config.FastSyncPivotNumber, config.FastSyncPivotHash, config.FastSyncPivotRoot) + } eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, stack.Config().AnnounceTxs) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index deb29e132ab0..669c2fb1c546 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -27,7 +27,11 @@ import ( "github.com/XinFinOrg/XDPoSChain" "github.com/XinFinOrg/XDPoSChain/common" + xdc_sort "github.com/XinFinOrg/XDPoSChain/common/sort" + "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/engines/engine_v2" + "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/utils" "github.com/XinFinOrg/XDPoSChain/core/rawdb" + "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/event" @@ -67,9 +71,9 @@ var ( reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs - fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync + fsHeaderCheckFrequency = 0 // Verification frequency of the downloaded headers during fast sync fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected - fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it + fsHeaderForceVerify = 0 // Number of headers to verify before and after the pivot to accept it fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync ) @@ -125,6 +129,15 @@ type Downloader struct { notified int32 committed int32 + // Pivot block configuration (set before sync starts) + pivotNumber uint64 // Fixed pivot block number (0 = use default calculation) + pivotHash common.Hash // Expected pivot block hash for verification + pivotRoot common.Hash // State root of pivot block for state sync + + // Gap pivots (calculated from primary pivot at Epoch-block intervals) + pivotGapNumbers []uint64 // List of gap pivot numbers to sync before primary + pivotGapLock sync.RWMutex // Protects pivotGapNumbers + // Channels headerCh chan dataPack // [eth/62] Channel receiving inbound block headers bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies @@ -248,6 +261,44 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchai return dl } +// SetPivotBlock sets the fixed pivot block number, hash and state root for fast sync. +// If set, the downloader will use this pivot instead of calculating one, +// and will verify the pivot block's hash after state sync completes. +// It also calculates gap pivots at some intervals that need state sync. +func (d *Downloader) SetPivotBlock(number uint64, hash common.Hash, root common.Hash) { + // Gap pivots are an XDPoS concept; skip the calculation when XDPoS is not configured. + if d.blockchain.Config().XDPoS == nil { + return + } + d.pivotNumber = number + d.pivotHash = hash + d.pivotRoot = root + + // Calculate all gap pivot numbers: N - N%Epoch - Gap where x < N + epoch := d.blockchain.Config().XDPoS.Epoch + gap := d.blockchain.Config().XDPoS.Gap + epochBase := number - number%epoch + var baseGap uint64 + if epochBase < gap { + baseGap = epoch - gap + } else { + baseGap = epochBase - gap + } + d.pivotGapLock.Lock() + d.pivotGapNumbers = nil + for i := uint64(0); ; i++ { + gapNumber := baseGap + epoch*i + if gapNumber >= number { + break + } + d.pivotGapNumbers = append(d.pivotGapNumbers, gapNumber) + } + if len(d.pivotGapNumbers) > 0 { + log.Info("SetPivotBlock calculated gap pivots", "primary", number, "gapCount", len(d.pivotGapNumbers), "gaps", d.pivotGapNumbers) + } + d.pivotGapLock.Unlock() +} + // Progress retrieves the synchronisation boundaries, specifically the origin // block where synchronisation started at (may have failed/suspended); the block // or header sync is currently at; and the latest known block which the sync targets. @@ -458,7 +509,14 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I // Ensure our origin point is below any fast sync pivot point pivot := uint64(0) if mode == FastSync { - if height <= uint64(fsMinFullBlocks) { + if d.pivotNumber != 0 { + // Use configured pivot block + log.Info("Using configured pivot block", "number", d.pivotNumber) + pivot = d.pivotNumber + if pivot <= origin { + origin = pivot - 1 + } + } else if height <= uint64(fsMinFullBlocks) { origin = 0 } else { pivot = height - uint64(fsMinFullBlocks) @@ -1410,11 +1468,9 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er unknown = append(unknown, header) } } - // If we're importing pure headers, verify based on their recentness + // If we're importing pure headers, verify with frequency=0. + // It's okay since in InsertChain, headers are verified again (full verify) frequency := fsHeaderCheckFrequency - if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { - frequency = 1 - } if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { rollbackErr = err // If some headers were inserted, add them too to the rollback list @@ -1565,9 +1621,26 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { // processFastSyncContent takes fetch results from the queue and writes them to the // database. It also controls the synchronisation of state nodes of the pivot block. func (d *Downloader) processFastSyncContent(latest *types.Header) error { + // Gap pivot tracking - only used when gap pivots are configured + var ( + syncedGaps = make(map[uint64]bool) // Track which gap pivots are synced + pendingGapRoots = make(map[uint64]common.Hash) // Gap pivot roots found but not yet synced + pendingGapHashes = make(map[uint64]common.Hash) // Gap pivot block hashes found but not yet synced + ) + d.pivotGapLock.RLock() + if len(d.pivotGapNumbers) > 0 { + log.Info("Configured gap pivot state syncs", "count", len(d.pivotGapNumbers), "gaps", d.pivotGapNumbers) + } + d.pivotGapLock.RUnlock() + // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. - sync := d.syncState(latest.Root) + root := latest.Root + if (d.pivotRoot != common.Hash{}) { + root = d.pivotRoot + } + log.Info("syncState", "number", d.pivotNumber, "root", root) + sync := d.syncState(root) defer func() { // The `sync` object is replaced every time the pivot moves. We need to // defer close the very last active one, hence the lazy evaluation vs. @@ -1583,10 +1656,13 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { go closeOnErr(sync) // Figure out the ideal pivot block. Note, that this goalpost may move if the // sync takes long enough for the chain head to move significantly. - pivot := uint64(0) + var pivot uint64 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { pivot = height - uint64(fsMinFullBlocks) } + if d.pivotNumber != 0 { + pivot = d.pivotNumber + } // To cater for moving pivot points, track the pivot block and subsequently // accumulated download results separatey. var ( @@ -1613,15 +1689,35 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { if d.chainInsertHook != nil { d.chainInsertHook(results) } + + // Collect gap pivot roots as blocks arrive + d.pivotGapLock.RLock() + if len(d.pivotGapNumbers) > 0 { + for _, result := range results { + num := result.Header.Number.Uint64() + for _, gapNum := range d.pivotGapNumbers { + if num == gapNum && !syncedGaps[gapNum] { + pendingGapRoots[gapNum] = result.Header.Root + pendingGapHashes[gapNum] = result.Header.Hash() + break + } + } + } + } + d.pivotGapLock.RUnlock() + if oldPivot != nil { results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } // Split around the pivot block and process the two sides via fast/full sync if atomic.LoadInt32(&d.committed) == 0 { latest = results[len(results)-1].Header - if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { - log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) - pivot = height - uint64(fsMinFullBlocks) + // Only allow pivot movement if not configured with fixed pivot + if d.pivotNumber == 0 { + if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { + log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) + pivot = height - uint64(fsMinFullBlocks) + } } } P, beforeP, afterP := splitAroundPivot(pivot, results) @@ -1632,6 +1728,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { // If new pivot block found, cancel old state retrieval and restart if oldPivot != P { sync.Cancel() + log.Info("restart syncState", "number", P.Header.Number, "root", P.Header.Root) sync = d.syncState(P.Header.Root) go closeOnErr(sync) @@ -1643,6 +1740,58 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { if sync.err != nil { return sync.err } + // If pivot hash is configured, verify the downloaded pivot block + if d.pivotHash != (common.Hash{}) { + if P.Header.Hash() != d.pivotHash { + return fmt.Errorf("pivot block hash mismatch: have %x, want %x", P.Header.Hash(), d.pivotHash) + } + log.Info("Pivot block hash verified", "number", P.Header.Number, "hash", P.Header.Hash()) + } + // Log state root for configured pivot + if d.pivotNumber != 0 { + log.Info("Pivot block state sync complete", "number", P.Header.Number, "hash", P.Header.Hash(), "root", P.Header.Root) + // Sync gap pivots after primary pivot state sync completes + d.pivotGapLock.RLock() + gapNumbers := make([]uint64, len(d.pivotGapNumbers)) + copy(gapNumbers, d.pivotGapNumbers) + d.pivotGapLock.RUnlock() + if len(gapNumbers) > 0 { + for _, gapNum := range gapNumbers { + root, ok := pendingGapRoots[gapNum] + if !ok { + return fmt.Errorf("gap pivot block %d not found in downloaded results", gapNum) + } + if syncedGaps[gapNum] { + continue + } + log.Info("syncState for gap pivot", "number", gapNum, "root", root) + gapSync := d.syncState(root) + if err := gapSync.Wait(); err != nil { + return err + } + log.Info("Gap pivot state sync complete", "number", gapNum, "root", root) + // Generate snapshot for this gap pivot + gapHash, ok := pendingGapHashes[gapNum] + if !ok { + return fmt.Errorf("gap pivot block hash %d not found", gapNum) + } + statedb, err := state.New(root, state.NewDatabase(d.stateDB)) + if err != nil { + log.Error("Failed to create state for gap pivot snapshot", "number", gapNum, "root", root, "err", err) + return err + } + if err := d.generateSnapshot(statedb, gapNum, gapHash); err != nil { + log.Error("Failed to generate snapshot for gap pivot", "number", gapNum, "hash", gapHash, "err", err) + return err + } + syncedGaps[gapNum] = true + } + log.Info("All gap pivot state syncs complete", "count", len(gapNumbers)) + d.pivotGapLock.Lock() + d.pivotGapNumbers = nil // Clear to avoid reprocessing + d.pivotGapLock.Unlock() + } + } if err := d.commitPivotBlock(P); err != nil { return err } @@ -1848,3 +1997,37 @@ func (d *Downloader) requestTTL() time.Duration { } return ttl } + +// generateSnapshot creates and stores a snapshot from the given state and block hash. +// It retrieves candidates from state, sorts them by stake in descending order, +// and stores the snapshot for future use. +func (d *Downloader) generateSnapshot(statedb *state.StateDB, number uint64, hash common.Hash) error { + candidates := statedb.GetCandidates() + var ms []utils.Masternode + for _, candidate := range candidates { + v := statedb.GetCandidateCap(candidate) + // Skip zero address candidates + if !candidate.IsZero() { + ms = append(ms, utils.Masternode{Address: candidate, Stake: v}) + } + } + xdc_sort.Slice(ms, func(i, j int) bool { + return ms[i].Stake.Cmp(ms[j].Stake) >= 0 + }) + + masterNodes := []common.Address{} + for _, m := range ms { + masterNodes = append(masterNodes, m.Address) + } + + snap := engine_v2.NewSnapshot(number, hash, masterNodes) + log.Info("[generateSnapshot] created snapshot", "number", number, "hash", hash.Hex(), "candidates", len(masterNodes)) + + err := engine_v2.StoreSnapshot(snap, d.stateDB) + if err != nil { + log.Error("[generateSnapshot] error while storing snapshot", "hash", hash, "error", err) + return err + } + + return nil +} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 9fe9bdaf02e6..73fe54eedc2b 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -17,6 +17,7 @@ package downloader import ( + "encoding/json" "errors" "fmt" "math/big" @@ -28,6 +29,7 @@ import ( ethereum "github.com/XinFinOrg/XDPoSChain" "github.com/XinFinOrg/XDPoSChain/common" + engine_v2 "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/engines/engine_v2" "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/ethdb" @@ -62,6 +64,10 @@ type downloadTester struct { insertHeaderChainHook func([]*types.Header) error + // configOverride, when non-nil, is returned by Config() instead of the + // default TestChainConfig. Used by tests that require XDPoS to be active. + configOverride *params.ChainConfig + lock sync.RWMutex } @@ -344,6 +350,9 @@ func (dl *downloadTester) handleProposedBlock(header *types.Header) error { // Config retrieves the blockchain's chain configuration. func (dl *downloadTester) Config() *params.ChainConfig { + if dl.configOverride != nil { + return dl.configOverride + } config := *params.TestChainConfig config.Eip1559Block = big.NewInt(0) return &config @@ -1803,3 +1812,236 @@ func testReorgProtectionDoesNotStallSync(t *testing.T, protocol int, mode SyncMo }) } } + +// TestSetPivotBlockStoresFields verifies that SetPivotBlock persists the pivot +// number, hash, and state root onto the downloader for later use during sync. +func TestSetPivotBlockStoresFields(t *testing.T) { + t.Parallel() + + tester := newTester() + // Provide an XDPoS config so SetPivotBlock does not short-circuit. + tester.configOverride = params.TestXDPoSMockChainConfig + defer tester.terminate() + d := tester.downloader + + wantNumber := uint64(1000) + wantHash := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + wantRoot := common.HexToHash("0xcafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe") + + d.SetPivotBlock(wantNumber, wantHash, wantRoot) + + if d.pivotNumber != wantNumber { + t.Errorf("pivotNumber mismatch: have %v, want %v", d.pivotNumber, wantNumber) + } + if d.pivotHash != wantHash { + t.Errorf("pivotHash mismatch: have %v, want %v", d.pivotHash, wantHash) + } + if d.pivotRoot != wantRoot { + t.Errorf("pivotRoot mismatch: have %v, want %v", d.pivotRoot, wantRoot) + } +} + +// TestSetPivotBlockGapCalculation verifies the gap pivot number derivation +// produced by SetPivotBlock for a range of primary pivot numbers. +// +// With TestXDPoSMockChainConfig (Epoch=900, Gap=450): +// +// epochBase = pivot - pivot%900 +// baseGap = epochBase-450 (or 900-450=450 when epochBase < 450) +// gaps = { baseGap + 900*i | i=0,1,… while value < pivot } +func TestSetPivotBlockGapCalculation(t *testing.T) { + t.Parallel() + + tester := newTester() + // TestXDPoSMockChainConfig has Epoch=900, Gap=450. + tester.configOverride = params.TestXDPoSMockChainConfig + defer tester.terminate() + d := tester.downloader + + tests := []struct { + pivot uint64 + wantGaps []uint64 + }{ + // pivot ≤ baseGap(450): no gap numbers are strictly less than pivot + {pivot: 200, wantGaps: nil}, + {pivot: 450, wantGaps: nil}, + // first gap (450) is below pivot for the first time + {pivot: 451, wantGaps: []uint64{450}}, + // pivot at exact epoch boundary (900): only gap at 450 + // epochBase=900, baseGap=900-450=450; 450<900→add, 1350≥900→stop + {pivot: 900, wantGaps: []uint64{450}}, + // pivot at baseGap+epoch (1350): 450<1350→add, 1350≥1350→stop + {pivot: 1350, wantGaps: []uint64{450}}, + // pivot just above 1350: both 450 and 1350 qualify + // epochBase=900, baseGap=450; 450<1351→add, 1350<1351→add, 2250≥1351→stop + {pivot: 1351, wantGaps: []uint64{450, 1350}}, + // pivot at 2*epoch (1800): epochBase=1800, baseGap=1800-450=1350; + // 1350<1800→add, 2250≥1800→stop → only [1350] + {pivot: 1800, wantGaps: []uint64{1350}}, + // pivot just above 2250 to get two gaps in a different epoch window: + // epochBase=1800, baseGap=1350; 1350<2251→add, 2250<2251→add, 3150≥2251→stop + {pivot: 2251, wantGaps: []uint64{1350, 2250}}, + } + + for _, tc := range tests { + d.SetPivotBlock(tc.pivot, common.Hash{}, common.Hash{}) + + d.pivotGapLock.RLock() + got := make([]uint64, len(d.pivotGapNumbers)) + copy(got, d.pivotGapNumbers) + d.pivotGapLock.RUnlock() + + if len(got) != len(tc.wantGaps) { + t.Errorf("pivot %d: gap count mismatch: have %v, want %v", tc.pivot, got, tc.wantGaps) + continue + } + for i, g := range got { + if g != tc.wantGaps[i] { + t.Errorf("pivot %d: gap[%d] = %v, want %v", tc.pivot, i, g, tc.wantGaps[i]) + } + } + } +} + +// TestFastSyncPivotHashMismatch checks that processFastSyncContent returns a +// descriptive "pivot block hash mismatch" error when the configured pivot hash +// does not match the actual downloaded pivot block. +func TestFastSyncPivotHashMismatch(t *testing.T) { + t.Parallel() + + tester := newTester() + // XDPoS config is required so SetPivotBlock can compute gap numbers. + // TestXDPoSMockChainConfig has Epoch=900, Gap=450. + tester.configOverride = params.TestXDPoSMockChainConfig + defer tester.terminate() + + // Use a chain short enough to be fast but long enough to trigger state sync. + chainLen := 300 + chain := testChainBase.shorten(chainLen) + tester.newPeer("peer", 63, chain) + + // Identify the natural pivot block so we can supply the correct state root + // (so state sync succeeds) while feeding a wrong hash (so the check fires). + // Natural pivot = headBlock().Number - fsMinFullBlocks = (chainLen-1) - fsMinFullBlocks. + pivotNum := uint64(chainLen - 1 - fsMinFullBlocks) // = 235 + pivotRoot := chain.headerm[chain.chain[pivotNum]].Root + + wrongHash := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + tester.downloader.SetPivotBlock(pivotNum, wrongHash, pivotRoot) + + err := tester.sync("peer", nil, FastSync) + if err == nil { + t.Fatal("expected pivot hash mismatch error, got nil") + } + if !strings.Contains(err.Error(), "pivot block hash mismatch") { + t.Fatalf("unexpected error: %q (want substring %q)", err.Error(), "pivot block hash mismatch") + } +} + +// TestFastSyncConfiguredPivotHashMatch verifies that setting the correct pivot +// hash and state root allows fast sync to complete successfully. +func TestFastSyncConfiguredPivotHashMatch(t *testing.T) { + t.Parallel() + + tester := newTester() + // XDPoS config is required so SetPivotBlock can compute gap numbers. + // TestXDPoSMockChainConfig has Epoch=900, Gap=450. + tester.configOverride = params.TestXDPoSMockChainConfig + defer tester.terminate() + + chainLen := 300 + chain := testChainBase.shorten(chainLen) + tester.newPeer("peer", 63, chain) + + // Natural pivot = headBlock().Number - fsMinFullBlocks = (chainLen-1) - fsMinFullBlocks. + pivotNum := uint64(chainLen - 1 - fsMinFullBlocks) // = 235 + pivotHash := chain.headerm[chain.chain[pivotNum]].Hash() + pivotRoot := chain.headerm[chain.chain[pivotNum]].Root + + // With Epoch=900 and pivot=235 the calculated baseGap=450 > pivot, so + // pivotGapNumbers will be empty – this test focuses purely on hash verification. + tester.downloader.SetPivotBlock(pivotNum, pivotHash, pivotRoot) + + if err := tester.sync("peer", nil, FastSync); err != nil { + t.Fatalf("fast sync with correct pivot hash failed: %v", err) + } + assertOwnChain(t, tester, chainLen) +} + +// TestFastSyncGapPivotSync exercises the gap-pivot state-sync path: when the +// configured pivot is high enough that SetPivotBlock calculates one or more gap +// pivot numbers, processFastSyncContent must state-sync each gap block and +// generate a snapshot for it before committing the primary pivot. +// +// Chain layout (Epoch=900, Gap=450, pivot=536, gap pivot=[450]): +// +// blocks 1-535 → fast-sync (receipts) +// block 450 → gap pivot: state synced + snapshot generated +// block 536 → primary pivot: state synced + committed +// blocks 537-600 → full-sync +func TestFastSyncGapPivotSync(t *testing.T) { + t.Parallel() + + tester := newTester() + // XDPoS config is required so SetPivotBlock can compute gap numbers. + // TestXDPoSMockChainConfig has Epoch=900, Gap=450. + tester.configOverride = params.TestXDPoSMockChainConfig + defer tester.terminate() + + // 600 blocks: natural pivot = 600-64 = 536, gap pivot = 450. + chainLen := 600 + chain := testChainBase.shorten(chainLen) + tester.newPeer("peer", 63, chain) + + // Natural pivot = headBlock().Number - fsMinFullBlocks = (chainLen-1) - fsMinFullBlocks. + pivotNum := uint64(chainLen - 1 - fsMinFullBlocks) // = 535 + pivotHash := chain.headerm[chain.chain[pivotNum]].Hash() + pivotRoot := chain.headerm[chain.chain[pivotNum]].Root + + tester.downloader.SetPivotBlock(pivotNum, pivotHash, pivotRoot) + + // After SetPivotBlock the gap list should contain exactly block 450: + // epochBase=0 (535<900), baseGap=450, first gap=450 < 535. + tester.downloader.pivotGapLock.RLock() + gaps := make([]uint64, len(tester.downloader.pivotGapNumbers)) + copy(gaps, tester.downloader.pivotGapNumbers) + tester.downloader.pivotGapLock.RUnlock() + + if len(gaps) != 1 || gaps[0] != 450 { + t.Fatalf("expected gap pivots [450], got %v", gaps) + } + + if err := tester.sync("peer", nil, FastSync); err != nil { + t.Fatalf("fast sync with gap pivot failed: %v", err) + } + assertOwnChain(t, tester, chainLen) + + // After a successful sync the gap list should have been cleared. + tester.downloader.pivotGapLock.RLock() + remaining := len(tester.downloader.pivotGapNumbers) + tester.downloader.pivotGapLock.RUnlock() + if remaining != 0 { + t.Errorf("pivotGapNumbers not cleared after sync: %d entries remain", remaining) + } + + // Verify that the snapshot for the gap pivot block (450) was stored and can + // be loaded back from the downloader's state database. + gapBlockHash := chain.headerm[chain.chain[450]].Hash() + blob, err := rawdb.ReadXdposV2Snapshot(tester.downloader.stateDB, gapBlockHash) + if err != nil { + t.Fatalf("snapshot for gap pivot block 450 not found in stateDB: %v", err) + } + if len(blob) == 0 { + t.Fatal("snapshot blob for gap pivot block 450 is empty") + } + var snap engine_v2.SnapshotV2 + if err := json.Unmarshal(blob, &snap); err != nil { + t.Fatalf("failed to unmarshal gap pivot snapshot: %v", err) + } + if snap.Number != 450 { + t.Errorf("snapshot number mismatch: have %d, want 450", snap.Number) + } + if snap.Hash != gapBlockHash { + t.Errorf("snapshot hash mismatch: have %v, want %v", snap.Hash, gapBlockHash) + } +} diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 7b68f739852f..52ea7d46110b 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -290,7 +290,6 @@ type codeTask struct { // newStateSync creates a new state trie download scheduler. This method does not // yet start the sync. The user needs to call run to initiate. -// only use fast sync but XDC only run full sync // TODO(daniel): remove field sched func newStateSync(d *Downloader, root common.Hash) *stateSync { return &stateSync{ diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 499be7501d65..068bb6402c0d 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -20,6 +20,7 @@ package ethconfig import ( "time" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/txpool/legacypool" "github.com/XinFinOrg/XDPoSChain/eth/downloader" @@ -70,6 +71,11 @@ type Config struct { NetworkId uint64 SyncMode downloader.SyncMode + // Fast sync pivot configuration + FastSyncPivotNumber uint64 // Pivot block number for fast sync (0 = use default calculation) + FastSyncPivotHash common.Hash // Pivot block hash for fast sync verification (zero = skip verification) + FastSyncPivotRoot common.Hash // State root of pivot block for state sync (zero = use latest.Root) + NoPruning bool // Whether to disable pruning and flush everything to disk Prefetch bool // Whether to enable prefetching and only load state on demand diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 4de9eb65db6f..0e8fcf4014b6 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -5,6 +5,7 @@ package ethconfig import ( "time" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/txpool/legacypool" "github.com/XinFinOrg/XDPoSChain/eth/downloader" @@ -18,6 +19,9 @@ func (c Config) MarshalTOML() (interface{}, error) { Genesis *core.Genesis `toml:",omitempty"` NetworkId uint64 SyncMode downloader.SyncMode + FastSyncPivotNumber uint64 + FastSyncPivotHash common.Hash + FastSyncPivotRoot common.Hash NoPruning bool Prefetch bool LightServ int `toml:",omitempty"` @@ -47,6 +51,9 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.Genesis = c.Genesis enc.NetworkId = c.NetworkId enc.SyncMode = c.SyncMode + enc.FastSyncPivotNumber = c.FastSyncPivotNumber + enc.FastSyncPivotHash = c.FastSyncPivotHash + enc.FastSyncPivotRoot = c.FastSyncPivotRoot enc.NoPruning = c.NoPruning enc.Prefetch = c.Prefetch enc.LightServ = c.LightServ @@ -80,6 +87,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { Genesis *core.Genesis `toml:",omitempty"` NetworkId *uint64 SyncMode *downloader.SyncMode + FastSyncPivotNumber *uint64 + FastSyncPivotHash *common.Hash + FastSyncPivotRoot *common.Hash NoPruning *bool Prefetch *bool LightServ *int `toml:",omitempty"` @@ -118,6 +128,15 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.SyncMode != nil { c.SyncMode = *dec.SyncMode } + if dec.FastSyncPivotNumber != nil { + c.FastSyncPivotNumber = *dec.FastSyncPivotNumber + } + if dec.FastSyncPivotHash != nil { + c.FastSyncPivotHash = *dec.FastSyncPivotHash + } + if dec.FastSyncPivotRoot != nil { + c.FastSyncPivotRoot = *dec.FastSyncPivotRoot + } if dec.NoPruning != nil { c.NoPruning = *dec.NoPruning }