diff --git a/app/app.go b/app/app.go index 79bab3b229..d82a86fe45 100644 --- a/app/app.go +++ b/app/app.go @@ -1671,7 +1671,7 @@ func (app *App) RegisterTxService(clientCtx client.Context) { func (app *App) RPCContextProvider(i int64) sdk.Context { if i == evmrpc.LatestCtxHeight { - return app.GetCheckCtx().WithIsEVM(true).WithIsTracing(true).WithIsCheckTx(false).WithClosestUpgradeName(LatestUpgrade) + return app.GetCheckCtx().WithIsEVM(true).WithTraceMode(true).WithIsCheckTx(false).WithClosestUpgradeName(LatestUpgrade) } ctx, err := app.CreateQueryContext(i, false) if err != nil { @@ -1682,7 +1682,7 @@ func (app *App) RPCContextProvider(i int64) sdk.Context { closestUpgrade = LatestUpgrade } ctx = ctx.WithClosestUpgradeName(closestUpgrade) - return ctx.WithIsEVM(true).WithIsTracing(true).WithIsCheckTx(false) + return ctx.WithIsEVM(true).WithTraceMode(true).WithIsCheckTx(false) } // RegisterTendermintService implements the Application.RegisterTendermintService method. diff --git a/evmrpc/block_trace_profiled.go b/evmrpc/block_trace_profiled.go new file mode 100644 index 0000000000..4aadaa871e --- /dev/null +++ b/evmrpc/block_trace_profiled.go @@ -0,0 +1,472 @@ +package evmrpc + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "runtime" + "strings" + "sync" + "time" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + gethstate "github.com/ethereum/go-ethereum/core/state" + gethtracing "github.com/ethereum/go-ethereum/core/tracing" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + traceLogger "github.com/ethereum/go-ethereum/eth/tracers/logger" + "github.com/ethereum/go-ethereum/eth/tracers/tracersutils" + "github.com/ethereum/go-ethereum/rpc" +) + +const profiledDefaultTraceTimeout = 5 * time.Second +const profiledDefaultTraceReexec = uint64(128) +const maxProfiledTraceWorkers = 16 + +func shouldUseProfiledBlockTrace(config *tracers.TraceConfig) bool { + return config == nil || config.Tracer == nil || *config.Tracer == "" +} + +func (api *DebugAPI) profiledTraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *tracers.TraceConfig) (interface{}, error) { + block, metadata, err := api.backend.BlockByNumber(ctx, number) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block #%d not found", number) + } + return api.profiledTraceBlock(ctx, block, metadata, config) +} + +func (api *DebugAPI) profiledTraceBlockByHash(ctx context.Context, hash gethcommon.Hash, config *tracers.TraceConfig) (interface{}, error) { + block, metadata, err := api.backend.BlockByHash(ctx, hash) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block %s not found", hash.Hex()) + } + return api.profiledTraceBlock(ctx, block, metadata, config) +} + +func (api *DebugAPI) profiledTraceBlock( + ctx context.Context, + block *gethtypes.Block, + metadata []tracersutils.TraceBlockMetadata, + config *tracers.TraceConfig, +) ([]*tracers.TxTraceResult, error) { + if block.NumberU64() == 0 { + return nil, errors.New("genesis is not traceable") + } + if !block.Number().IsInt64() { + return nil, fmt.Errorf("block number exceeds int64: %s", block.Number()) + } + + parent, _, err := api.backend.BlockByNumber(ctx, rpc.BlockNumber(block.Number().Int64()-1)) + if err != nil { + return nil, err + } + if parent == nil || parent.Hash() != block.ParentHash() { + parent, _, err = api.backend.BlockByHash(ctx, block.ParentHash()) + if err != nil { + return nil, err + } + if parent == nil { + return nil, fmt.Errorf("parent block %s not found", block.ParentHash().Hex()) + } + } + + reexec := profiledDefaultTraceReexec + if config != nil && config.Reexec != nil { + reexec = *config.Reexec + } + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + if err != nil { + return nil, err + } + defer release() + + blockCtx, err := api.backend.GetBlockContext(ctx, block, statedb, api.backend) + if err != nil { + return nil, fmt.Errorf("cannot get block context: %w", err) + } + txs := block.Transactions() + blockHash := block.Hash() + signer := gethtypes.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) + results := make([]*tracers.TxTraceResult, len(txs)) + + tracedCount := len(txs) + if len(metadata) > 0 { + tracedCount = 0 + for _, md := range metadata { + if md.ShouldIncludeInTraceResult { + tracedCount++ + } + } + } + threads := min(runtime.NumCPU(), tracedCount) + threads = min(threads, maxProfiledTraceWorkers) + if threads <= 1 { + return api.profiledTraceBlockSequential(ctx, block, metadata, config, statedb, blockCtx, signer, blockHash, results) + } + return api.profiledTraceBlockParallel(ctx, block, metadata, config, statedb, signer, blockHash, results, threads) +} + +func (api *DebugAPI) profiledTraceBlockSequential( + ctx context.Context, + block *gethtypes.Block, + metadata []tracersutils.TraceBlockMetadata, + config *tracers.TraceConfig, + statedb vm.StateDB, + blockCtx vm.BlockContext, + signer gethtypes.Signer, + blockHash gethcommon.Hash, + results []*tracers.TxTraceResult, +) ([]*tracers.TxTraceResult, error) { + txs := block.Transactions() + traceOne := func(i int, tx *gethtypes.Transaction) { + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + txctx := &tracers.Context{ + BlockHash: blockHash, + BlockNumber: block.Number(), + TxIndex: i, + TxHash: tx.Hash(), + } + res, err := api.profiledTraceTx(ctx, tx, msg, txctx, blockCtx, statedb, config, nil, false) + if err != nil { + results[i] = &tracers.TxTraceResult{TxHash: tx.Hash(), Error: err.Error()} + } else { + results[i] = &tracers.TxTraceResult{TxHash: tx.Hash(), Result: res} + } + } + + if len(metadata) == 0 { + for i, tx := range txs { + traceOne(i, tx) + } + return results, nil + } + for _, md := range metadata { + if md.ShouldIncludeInTraceResult { + i := md.IdxInEthBlock + traceOne(i, txs[i]) + if results[i] != nil && results[i].Error != "" { + statedb.RevertToSnapshot(0) + } + continue + } + md.TraceRunnable(statedb) + } + return results, nil +} + +type profiledTxTraceTask struct { + index int + statedb vm.StateDB +} + +func (api *DebugAPI) profiledTraceBlockParallel( + ctx context.Context, + block *gethtypes.Block, + metadata []tracersutils.TraceBlockMetadata, + config *tracers.TraceConfig, + statedb vm.StateDB, + signer gethtypes.Signer, + blockHash gethcommon.Hash, + results []*tracers.TxTraceResult, + threads int, +) ([]*tracers.TxTraceResult, error) { + txs := block.Transactions() + jobs := make(chan *profiledTxTraceTask, threads) + var pend sync.WaitGroup + + for th := 0; th < threads; th++ { + pend.Add(1) + go func() { + defer pend.Done() + for task := range jobs { + tx := txs[task.index] + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + txctx := &tracers.Context{ + BlockHash: blockHash, + BlockNumber: block.Number(), + TxIndex: task.index, + TxHash: tx.Hash(), + } + blockCtx, err := api.backend.GetBlockContext(ctx, block, task.statedb, api.backend) + if err != nil { + results[task.index] = &tracers.TxTraceResult{TxHash: tx.Hash(), Error: err.Error()} + continue + } + res, err := api.profiledTraceTx(ctx, tx, msg, txctx, blockCtx, task.statedb, config, nil, true) + if err != nil { + results[task.index] = &tracers.TxTraceResult{TxHash: tx.Hash(), Error: err.Error()} + } else { + results[task.index] = &tracers.TxTraceResult{TxHash: tx.Hash(), Result: res} + } + } + }() + } + + mainBlockCtx, err := api.backend.GetBlockContext(ctx, block, statedb, api.backend) + if err != nil { + close(jobs) + pend.Wait() + return nil, err + } + evm := vm.NewEVM(mainBlockCtx, statedb, api.backend.ChainConfigAtHeight(block.Number().Int64()), vm.Config{}, api.backend.GetCustomPrecompiles(block.Number().Int64())) + var failed error + + advanceState := func(i int, tx *gethtypes.Transaction) error { + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + statedb.SetTxContext(tx.Hash(), i) + if err := api.backend.PrepareTxNoFlush(statedb, tx); err != nil { + return err + } + if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { + return err + } + statedb.Finalise(evm.ChainConfig().IsEIP158(block.Number())) + return nil + } + + feedTraceTask := func(i int) error { + task := &profiledTxTraceTask{statedb: statedb.Copy(), index: i} + select { + case <-ctx.Done(): + return ctx.Err() + case jobs <- task: + return nil + } + } + + if len(metadata) == 0 { + for i, tx := range txs { + if err := feedTraceTask(i); err != nil { + failed = err + break + } + if err := advanceState(i, tx); err != nil { + failed = err + break + } + } + } else { + for _, md := range metadata { + if md.ShouldIncludeInTraceResult { + i := md.IdxInEthBlock + if err := feedTraceTask(i); err != nil { + failed = err + break + } + if err := advanceState(i, txs[i]); err != nil { + failed = err + break + } + continue + } + md.TraceRunnable(statedb) + } + } + + close(jobs) + pend.Wait() + + if failed != nil { + // Fill error entries for txs that were never dispatched to workers, + // matching the sequential path's per-tx error semantics. + if len(metadata) == 0 { + for i := range results { + if results[i] == nil { + results[i] = &tracers.TxTraceResult{ + TxHash: txs[i].Hash(), + Error: fmt.Sprintf("state advancement failed at prior tx: %v", failed), + } + } + } + } else { + for _, md := range metadata { + if md.ShouldIncludeInTraceResult { + i := md.IdxInEthBlock + if results[i] == nil { + results[i] = &tracers.TxTraceResult{ + TxHash: txs[i].Hash(), + Error: fmt.Sprintf("state advancement failed at prior tx: %v", failed), + } + } + } + } + } + } + return results, nil +} + +func (api *DebugAPI) profiledTraceTx( + ctx context.Context, + tx *gethtypes.Transaction, + message *core.Message, + txctx *tracers.Context, + vmctx vm.BlockContext, + statedb vm.StateDB, + config *tracers.TraceConfig, + precompiles vm.PrecompiledContracts, + noFlush bool, +) (value interface{}, returnErr error) { + var ( + tracer *tracers.Tracer + tracerMtx *sync.Mutex + err error + timeout = profiledDefaultTraceTimeout + usedGas uint64 + ) + + startingNonce := statedb.GetNonce(message.From) + defer func() { + if r := recover(); r != nil { + value, returnErr = profiledErrorTrace(fmt.Errorf("%s", r), tx, message, txctx, vmctx, config) + } + nonce := statedb.GetNonce(message.From) + if nonce == startingNonce { + statedb.SetNonce(message.From, nonce+1, gethtracing.NonceChangeUnspecified) + } + }() + + if config == nil { + config = &tracers.TraceConfig{} + } + if config.Tracer == nil { + logger := traceLogger.NewStructLogger(config.Config) + tracer = &tracers.Tracer{ + Hooks: logger.Hooks(), + GetResult: logger.GetResult, + Stop: logger.Stop, + } + } else { + tracer, err = tracers.DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig, api.backend.ChainConfigAtHeight(vmctx.BlockNumber.Int64())) + if err != nil { + return nil, err + } + } + tracingStateDB := gethstate.NewHookedState(statedb, tracer.Hooks) + tracerMtx = &sync.Mutex{} + txContext := core.NewEVMTxContext(message) + evm := vm.NewEVM(vmctx, tracingStateDB, api.backend.ChainConfigAtHeight(vmctx.BlockNumber.Int64()), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}, api.backend.GetCustomPrecompiles(vmctx.BlockNumber.Int64())) + if precompiles != nil { + evm.SetPrecompiles(precompiles) + } + evm.SetTxContext(txContext) + + if config.Timeout != nil { + if timeout, err = time.ParseDuration(*config.Timeout); err != nil { + return nil, err + } + } + deadlineCtx, cancel := context.WithTimeout(ctx, timeout) + go func() { + <-deadlineCtx.Done() + if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { + tracerMtx.Lock() + tracer.Stop(errors.New("execution timeout")) + tracerMtx.Unlock() + evm.Cancel() + } + }() + defer cancel() + + statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) + var prepareTxErr error + if noFlush { + prepareTxErr = api.backend.PrepareTxNoFlush(statedb, tx) + } else { + prepareTxErr = api.backend.PrepareTx(statedb, tx) + } + if prepareTxErr != nil { + return profiledErrorTrace(prepareTxErr, tx, message, txctx, vmctx, config) + } + _, err = core.ApplyTransactionWithEVM(message, new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, evm) + if err != nil { + return profiledErrorTrace(err, tx, message, txctx, vmctx, config) + } + tracerMtx.Lock() + res, err := tracer.GetResult() + tracerMtx.Unlock() + if err == nil && errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { + err = errors.New("execution timeout") + } + return res, err +} + +func profiledErrorTrace(err error, tx *gethtypes.Transaction, message *core.Message, txctx *tracers.Context, vmctx vm.BlockContext, config *tracers.TraceConfig) (value interface{}, returnErr error) { + if config != nil && config.Tracer != nil { + switch *config.Tracer { + case "callTracer": + errTrace := map[string]interface{}{ + "from": message.From.Hex(), + "gas": hexutil.Uint64(message.GasLimit), + "gasUsed": "0x0", + "input": "0x", + "error": err.Error(), + "type": "CALL", + } + if message.Value != nil { + errTrace["value"] = hexutil.Big(*message.Value) + } + if message.To != nil { + errTrace["to"] = message.To.Hex() + } else { + errTrace["type"] = "CREATE" + } + if message.Data != nil { + errTrace["input"] = hexutil.Encode(message.Data) + } + bz, marshalErr := json.Marshal(errTrace) + if marshalErr != nil { + return nil, fmt.Errorf("tracing failed: %w", marshalErr) + } + return json.RawMessage(bz), nil + case "flatCallTracer": + action := map[string]interface{}{ + "callType": "call", + "from": message.From.Hex(), + "gas": hexutil.Uint64(message.GasLimit), + "input": "0x", + } + if message.Value != nil { + action["value"] = hexutil.Big(*message.Value) + } + if message.To != nil { + action["to"] = message.To.Hex() + } + if message.Data != nil { + action["input"] = hexutil.Encode(message.Data) + } + errTrace := map[string]interface{}{ + "action": action, + "blockHash": txctx.BlockHash, + "blockNumber": txctx.BlockNumber, + "result": map[string]interface{}{ + "gasUsed": "0x0", + "output": "0x", + }, + "subtraces": 0, + "traceAddress": []string{}, + "transactionHash": tx.Hash(), + "transactionPosition": txctx.TxIndex, + "error": err.Error(), + } + bz, marshalErr := json.Marshal([]map[string]interface{}{errTrace}) + if marshalErr != nil { + return nil, fmt.Errorf("tracing failed: %w", marshalErr) + } + return json.RawMessage(bz), nil + } + } + if strings.Contains(err.Error(), core.ErrInsufficientFunds.Error()) { + return json.RawMessage(`{}`), nil + } + return nil, fmt.Errorf("tracing failed: %w", err) +} diff --git a/evmrpc/simulate.go b/evmrpc/simulate.go index 3de089ebf8..ce69edec89 100644 --- a/evmrpc/simulate.go +++ b/evmrpc/simulate.go @@ -668,6 +668,37 @@ func (b *Backend) PrepareTx(statedb vm.StateDB, tx *ethtypes.Transaction) error return nil } +// PrepareTxNoFlush is like PrepareTx but uses ResetForTracer instead of +// CleanupForTracer, avoiding CacheMultiStore flushes. This is required in the +// parallel block trace path where copies of the statedb are concurrently read +// by worker goroutines; flushing would write to shared CacheMultiStore layers +// and cause data races. +func (b *Backend) PrepareTxNoFlush(statedb vm.StateDB, tx *ethtypes.Transaction) error { + typedStateDB := state.GetDBImpl(statedb) + typedStateDB.ResetForTracer() + ctx, _ := b.keeper.PrepareCtxForEVMTransaction(typedStateDB.Ctx(), tx) + ctx = ctx.WithIsEVM(true) + if noSignatureSet(tx) { + return nil + } + txData, err := ethtx.NewTxDataFromTx(tx) + if err != nil { + return fmt.Errorf("transaction cannot be converted to TxData due to %s", err) + } + msg, err := types.NewMsgEVMTransaction(txData) + if err != nil { + return fmt.Errorf("transaction cannot be converted to MsgEVMTransaction due to %s", err) + } + tb := b.txConfigProvider(ctx.BlockHeight()).NewTxBuilder() + _ = tb.SetMsgs(msg) + newCtx, err := b.antehandler(ctx, tb.GetTx(), false) + if err != nil { + return fmt.Errorf("transaction failed ante handler due to %s", err) + } + typedStateDB.WithCtx(newCtx) + return nil +} + func (b *Backend) GetBlockContext(ctx context.Context, block *ethtypes.Block, statedb vm.StateDB, backend export.ChainContextBackend) (vm.BlockContext, error) { blockCtx, err := b.keeper.GetVMBlockContext(statedb.(*state.DBImpl).Ctx(), b.keeper.GetGasPool()) if err != nil { diff --git a/evmrpc/tests/tracers_test.go b/evmrpc/tests/tracers_test.go index 26ed0e479f..3249e7680d 100644 --- a/evmrpc/tests/tracers_test.go +++ b/evmrpc/tests/tracers_test.go @@ -165,3 +165,67 @@ func TestTraceBlockWithFailureThenSuccess(t *testing.T) { }, ) } + +func TestTraceBlockByNumberDefaultTracerDoesNotAbortOnFailedTx(t *testing.T) { + maxUseiInWei := sdk.NewInt(math.MaxInt64).Mul(state.SdkUseiToSweiMultiplier).BigInt() + insufficientFundsTx := signAndEncodeTx(sendAmount(0, maxUseiInWei), mnemonic1) + successTx := signAndEncodeTx(send(1), mnemonic1) + + SetupTestServer(t, [][][]byte{{insufficientFundsTx, successTx}}, mnemonicInitializer(mnemonic1)).Run( + func(port int) { + res := sendRequestWithNamespace("debug", port, "traceBlockByNumber", "0x2", map[string]interface{}{ + "timeout": "60s", + }) + + require.NotContains(t, res, "error") + txs := res["result"].([]interface{}) + require.Len(t, txs, 2) + // Both txs should have per-tx entries (result or error); + // the key assertion is that the block trace did NOT abort + // with a top-level error. + tx0 := txs[0].(map[string]interface{}) + require.True(t, tx0["result"] != nil || tx0["error"] != nil, + "tx0 should have a result or error entry") + tx1 := txs[1].(map[string]interface{}) + require.True(t, tx1["result"] != nil || tx1["error"] != nil, + "tx1 should have a result or error entry") + }, + ) +} + +func TestTraceBlockByNumberDefaultTracerMatchesTraceTransaction(t *testing.T) { + cwIter := "sei18cszlvm6pze0x9sz32qnjq4vtd45xehqs8dq7cwy8yhq35wfnn3quh5sau" // hardcoded + + tx1Data := callWasmIter(0, cwIter) + signedTx1 := signTxWithMnemonic(tx1Data, mnemonic1) + tx1Bz := encodeEvmTx(tx1Data, signedTx1) + + tx2Data := callWasmIter(1, cwIter) + signedTx2 := signTxWithMnemonic(tx2Data, mnemonic1) + tx2Bz := encodeEvmTx(tx2Data, signedTx2) + + SetupTestServer(t, [][][]byte{{tx1Bz, tx2Bz}}, mnemonicInitializer(mnemonic1), cwIterInitializer(mnemonic1)).Run( + func(port int) { + blockRes := sendRequestWithNamespace("debug", port, "traceBlockByNumber", "0x2", map[string]interface{}{ + "timeout": "60s", + }) + txRes := sendRequestWithNamespace("debug", port, "traceTransaction", signedTx2.Hash().Hex(), map[string]interface{}{ + "timeout": "60s", + }) + + require.NotContains(t, blockRes, "error") + require.NotContains(t, txRes, "error") + + blockTxs := blockRes["result"].([]interface{}) + require.Len(t, blockTxs, 2) + + blockTrace := blockTxs[1].(map[string]interface{})["result"] + txTrace := txRes["result"] + txTraceJSON, err := json.Marshal(txTrace) + require.NoError(t, err) + blockTraceJSON, err := json.Marshal(blockTrace) + require.NoError(t, err) + require.JSONEq(t, string(txTraceJSON), string(blockTraceJSON)) + }, + ) +} diff --git a/evmrpc/tracers.go b/evmrpc/tracers.go index 910380f3cb..b3c46eb948 100644 --- a/evmrpc/tracers.go +++ b/evmrpc/tracers.go @@ -218,8 +218,11 @@ func (api *SeiDebugAPI) TraceBlockByNumberExcludeTraceFail(ctx context.Context, return nil, fmt.Errorf("block number %d is beyond max lookback of %d", number.Int64(), api.maxBlockLookback) } - // Accessing tracersAPI from the embedded DebugAPI - result, returnErr = api.tracersAPI.TraceBlockByNumber(ctx, number, config) + if shouldUseProfiledBlockTrace(config) { + result, returnErr = api.profiledTraceBlockByNumber(ctx, number, config) + } else { + result, returnErr = api.tracersAPI.TraceBlockByNumber(ctx, number, config) + } if returnErr != nil { return } @@ -247,8 +250,11 @@ func (api *SeiDebugAPI) TraceBlockByHashExcludeTraceFail(ctx context.Context, ha } defer done() - // Accessing tracersAPI from the embedded DebugAPI - result, returnErr = api.tracersAPI.TraceBlockByHash(ctx, hash, config) + if shouldUseProfiledBlockTrace(config) { + result, returnErr = api.profiledTraceBlockByHash(ctx, hash, config) + } else { + result, returnErr = api.tracersAPI.TraceBlockByHash(ctx, hash, config) + } if returnErr != nil { return } @@ -334,7 +340,11 @@ func (api *DebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.BlockNum return nil, fmt.Errorf("block number %d is beyond max lookback of %d", number.Int64(), api.maxBlockLookback) } - result, returnErr = api.tracersAPI.TraceBlockByNumber(ctx, number, config) + if shouldUseProfiledBlockTrace(config) { + result, returnErr = api.profiledTraceBlockByNumber(ctx, number, config) + } else { + result, returnErr = api.tracersAPI.TraceBlockByNumber(ctx, number, config) + } return } @@ -348,7 +358,11 @@ func (api *DebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, con } defer done() - result, returnErr = api.tracersAPI.TraceBlockByHash(ctx, hash, config) + if shouldUseProfiledBlockTrace(config) { + result, returnErr = api.profiledTraceBlockByHash(ctx, hash, config) + } else { + result, returnErr = api.tracersAPI.TraceBlockByHash(ctx, hash, config) + } return } @@ -383,8 +397,12 @@ func (api *DebugAPI) TraceStateAccess(ctx context.Context, hash common.Hash) (re tendermintTraces := &TendermintTraces{Traces: []TendermintTrace{}} ctx = WithTendermintTraces(ctx, tendermintTraces) receiptTraces := &ReceiptTraces{Traces: []RawResponseReceipt{}} + tracingBackend := *api.backend + tracingBackend.ctxProvider = func(height int64) sdk.Context { + return api.ctxProvider(height).WithIsTracing(true) + } ctx = WithReceiptTraces(ctx, receiptTraces) - _, tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + _, tx, blockHash, blockNumber, index, err := tracingBackend.GetTransaction(ctx, hash) if err != nil { return nil, err } @@ -396,11 +414,11 @@ func (api *DebugAPI) TraceStateAccess(ctx context.Context, hash common.Hash) (re if blockNumber == 0 { return nil, errors.New("genesis is not traceable") } - block, _, err := api.backend.BlockByHash(ctx, blockHash) + block, _, err := tracingBackend.BlockByHash(ctx, blockHash) if err != nil { return nil, err } - stateDB, _, err := api.backend.ReplayTransactionTillIndex(ctx, block, int(index)) //nolint:gosec + stateDB, _, err := tracingBackend.ReplayTransactionTillIndex(ctx, block, int(index)) //nolint:gosec if err != nil { return nil, err } diff --git a/sei-cosmos/types/context.go b/sei-cosmos/types/context.go index d4fd47b8d8..149cfb0b66 100644 --- a/sei-cosmos/types/context.go +++ b/sei-cosmos/types/context.go @@ -475,6 +475,19 @@ func (c Context) WithIsTracing(it bool) Context { c.isTracing = it if it { c.storeTracer = NewStoreTracer() + } else { + c.storeTracer = nil + } + return c +} + +// WithTraceMode enables historical tracing behavior without allocating a KV +// store tracer. This keeps upgrade-aware tracing semantics for ordinary +// debug_trace* RPCs without paying the per-access StoreTracer overhead. +func (c Context) WithTraceMode(it bool) Context { + c.isTracing = it + if !it { + c.storeTracer = nil } return c } diff --git a/x/evm/state/code.go b/x/evm/state/code.go index ef4a0c2d23..b8ceeefacc 100644 --- a/x/evm/state/code.go +++ b/x/evm/state/code.go @@ -23,7 +23,7 @@ func (s *DBImpl) SetCode(addr common.Address, code []byte) []byte { // The SetCode method could be modified to return the old code/hash directly. oldHash := s.GetCodeHash(addr) - s.logger.OnCodeChange(addr, oldHash, oldCode, common.Hash(crypto.Keccak256(code)), code) + s.logger.OnCodeChange(addr, oldHash, oldCode, crypto.Keccak256Hash(code), code) } s.k.SetCode(s.ctx, addr, code) diff --git a/x/evm/state/statedb.go b/x/evm/state/statedb.go index bf5a9b454e..fb8f41c4fc 100644 --- a/x/evm/state/statedb.go +++ b/x/evm/state/statedb.go @@ -98,6 +98,18 @@ func (s *DBImpl) CleanupForTracer() { s.Snapshot() } +// ResetForTracer resets in-memory state for a new transaction without flushing +// the CacheMultiStore hierarchy. This is safe for concurrent use when copies of +// this statedb are being read from other goroutines, since it never calls +// CacheMultiStore.Write() on any shared store layer. +func (s *DBImpl) ResetForTracer() { + feeCollector, _ := s.k.GetFeeCollectorAddress(s.Ctx()) + s.coinbaseEvmAddress = feeCollector + s.tempState = NewTemporaryState() + s.journal = []journalEntry{} + s.Snapshot() +} + func (s *DBImpl) Finalize() (surplus sdk.Int, err error) { if s.simulation { panic("should never call finalize on a simulation DB") @@ -156,9 +168,12 @@ func (s *DBImpl) Copy() vm.StateDB { newCtx := s.ctx.WithMultiStore(s.ctx.MultiStore().CacheMultiStore()).WithEventManager(sdk.NewEventManager()) journal := make([]journalEntry, len(s.journal)) copy(journal, s.journal) + snapshots := make([]sdk.Context, len(s.snapshottedCtxs)+1) + copy(snapshots, s.snapshottedCtxs) + snapshots[len(s.snapshottedCtxs)] = s.ctx return &DBImpl{ ctx: newCtx, - snapshottedCtxs: append(s.snapshottedCtxs, s.ctx), + snapshottedCtxs: snapshots, tempState: s.tempState.DeepCopy(), journal: journal, k: s.k,