From 23ac8df15302bbde098cab6d711abdd24843d66a Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 18 Aug 2022 13:28:06 +0200 Subject: [PATCH 01/26] cmd. core: save preimages on genesis creation (#25538) force preimage dump for genesis --- core/genesis.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/genesis.go b/core/genesis.go index 71214e84f50..c3b5d0b5709 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -104,7 +104,7 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { // all the generated states will be persisted into the given database. // Also, the genesis state specification will be flushed as well. func (ga *GenesisAlloc) flush(db ethdb.Database) error { - statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil) + statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil) if err != nil { return err } From cce7f084388adf9097f730ae48631a55de2e3be1 Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Thu, 18 Aug 2022 17:34:57 -0500 Subject: [PATCH 02/26] rlp/rlpgen: fix error handling when target type not found (#25547) typ will be nil when lookupStructType returns an error. cfg.Type should be used instead. --- rlp/rlpgen/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rlp/rlpgen/main.go b/rlp/rlpgen/main.go index 17d7e64e084..25d4393cc65 100644 --- a/rlp/rlpgen/main.go +++ b/rlp/rlpgen/main.go @@ -106,7 +106,7 @@ func (cfg *Config) process() (code []byte, err error) { // Find the type and generate. typ, err := lookupStructType(pkg.Scope(), cfg.Type) if err != nil { - return nil, fmt.Errorf("can't find %s in %s: %v", typ, pkg, err) + return nil, fmt.Errorf("can't find %s in %s: %v", cfg.Type, pkg, err) } code, err = bctx.generate(typ, cfg.GenerateEncoder, cfg.GenerateDecoder) if err != nil { From a1b8892384eed99beb3f3364cfd13b049a1c0167 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 19 Aug 2022 06:39:47 +0800 Subject: [PATCH 03/26] trie: improve node rlp decoding performance (#25357) This avoids copying the input []byte while decoding trie nodes. In most cases, particularly when the input slice is provided by the underlying database, this optimization is safe to use. For cases where the origin of the input slice is unclear, the copying version is retained. The new code performs better even when the input must be copied, because it is now only copied once in decodeNode. --- trie/database.go | 14 ++++-- trie/node.go | 30 ++++++++++-- trie/node_test.go | 121 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+), 7 deletions(-) diff --git a/trie/database.go b/trie/database.go index 8c9f4717684..8c154ba96df 100644 --- a/trie/database.go +++ b/trie/database.go @@ -163,7 +163,10 @@ func (n *cachedNode) rlp() []byte { // or by regenerating it from the rlp encoded blob. func (n *cachedNode) obj(hash common.Hash) node { if node, ok := n.node.(rawNode); ok { - return mustDecodeNode(hash[:], node) + // The raw-blob format nodes are loaded from either from + // clean cache or the database, they are all in their own + // copy and safe to use unsafe decoder. + return mustDecodeNodeUnsafe(hash[:], node) } return expandNode(hash[:], n.node) } @@ -346,7 +349,10 @@ func (db *Database) node(hash common.Hash) node { if enc := db.cleans.Get(nil, hash[:]); enc != nil { memcacheCleanHitMeter.Mark(1) memcacheCleanReadMeter.Mark(int64(len(enc))) - return mustDecodeNode(hash[:], enc) + + // The returned value from cache is in its own copy, + // safe to use mustDecodeNodeUnsafe for decoding. + return mustDecodeNodeUnsafe(hash[:], enc) } } // Retrieve the node from the dirty cache if available @@ -371,7 +377,9 @@ func (db *Database) node(hash common.Hash) node { memcacheCleanMissMeter.Mark(1) memcacheCleanWriteMeter.Mark(int64(len(enc))) } - return mustDecodeNode(hash[:], enc) + // The returned value from database is in its own copy, + // safe to use mustDecodeNodeUnsafe for decoding. + return mustDecodeNodeUnsafe(hash[:], enc) } // Node retrieves an encoded cached trie node from memory. If it cannot be found diff --git a/trie/node.go b/trie/node.go index bf3f024bb8a..6ce6551ded8 100644 --- a/trie/node.go +++ b/trie/node.go @@ -99,6 +99,7 @@ func (n valueNode) fstring(ind string) string { return fmt.Sprintf("%x ", []byte(n)) } +// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered. func mustDecodeNode(hash, buf []byte) node { n, err := decodeNode(hash, buf) if err != nil { @@ -107,8 +108,29 @@ func mustDecodeNode(hash, buf []byte) node { return n } -// decodeNode parses the RLP encoding of a trie node. +// mustDecodeNodeUnsafe is a wrapper of decodeNodeUnsafe and panic if any error is +// encountered. +func mustDecodeNodeUnsafe(hash, buf []byte) node { + n, err := decodeNodeUnsafe(hash, buf) + if err != nil { + panic(fmt.Sprintf("node %x: %v", hash, err)) + } + return n +} + +// decodeNode parses the RLP encoding of a trie node. It will deep-copy the passed +// byte slice for decoding, so it's safe to modify the byte slice afterwards. The- +// decode performance of this function is not optimal, but it is suitable for most +// scenarios with low performance requirements and hard to determine whether the +// byte slice be modified or not. func decodeNode(hash, buf []byte) (node, error) { + return decodeNodeUnsafe(hash, common.CopyBytes(buf)) +} + +// decodeNodeUnsafe parses the RLP encoding of a trie node. The passed byte slice +// will be directly referenced by node without bytes deep copy, so the input MUST +// not be changed after. +func decodeNodeUnsafe(hash, buf []byte) (node, error) { if len(buf) == 0 { return nil, io.ErrUnexpectedEOF } @@ -141,7 +163,7 @@ func decodeShort(hash, elems []byte) (node, error) { if err != nil { return nil, fmt.Errorf("invalid value node: %v", err) } - return &shortNode{key, append(valueNode{}, val...), flag}, nil + return &shortNode{key, valueNode(val), flag}, nil } r, _, err := decodeRef(rest) if err != nil { @@ -164,7 +186,7 @@ func decodeFull(hash, elems []byte) (*fullNode, error) { return n, err } if len(val) > 0 { - n.Children[16] = append(valueNode{}, val...) + n.Children[16] = valueNode(val) } return n, nil } @@ -190,7 +212,7 @@ func decodeRef(buf []byte) (node, []byte, error) { // empty node return nil, rest, nil case kind == rlp.String && len(val) == 32: - return append(hashNode{}, val...), rest, nil + return hashNode(val), rest, nil default: return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val)) } diff --git a/trie/node_test.go b/trie/node_test.go index ac1d8fbef3e..9b8b33748fa 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -20,6 +20,7 @@ import ( "bytes" "testing" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) @@ -92,3 +93,123 @@ func TestDecodeFullNode(t *testing.T) { t.Fatalf("decode full node err: %v", err) } } + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkEncodeShortNode +// BenchmarkEncodeShortNode-8 16878850 70.81 ns/op 48 B/op 1 allocs/op +func BenchmarkEncodeShortNode(b *testing.B) { + node := &shortNode{ + Key: []byte{0x1, 0x2}, + Val: hashNode(randBytes(32)), + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + nodeToBytes(node) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkEncodeFullNode +// BenchmarkEncodeFullNode-8 4323273 284.4 ns/op 576 B/op 1 allocs/op +func BenchmarkEncodeFullNode(b *testing.B) { + node := &fullNode{} + for i := 0; i < 16; i++ { + node.Children[i] = hashNode(randBytes(32)) + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + nodeToBytes(node) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkDecodeShortNode +// BenchmarkDecodeShortNode-8 7925638 151.0 ns/op 157 B/op 4 allocs/op +func BenchmarkDecodeShortNode(b *testing.B) { + node := &shortNode{ + Key: []byte{0x1, 0x2}, + Val: hashNode(randBytes(32)), + } + blob := nodeToBytes(node) + hash := crypto.Keccak256(blob) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mustDecodeNode(hash, blob) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkDecodeShortNodeUnsafe +// BenchmarkDecodeShortNodeUnsafe-8 9027476 128.6 ns/op 109 B/op 3 allocs/op +func BenchmarkDecodeShortNodeUnsafe(b *testing.B) { + node := &shortNode{ + Key: []byte{0x1, 0x2}, + Val: hashNode(randBytes(32)), + } + blob := nodeToBytes(node) + hash := crypto.Keccak256(blob) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mustDecodeNodeUnsafe(hash, blob) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkDecodeFullNode +// BenchmarkDecodeFullNode-8 1597462 761.9 ns/op 1280 B/op 18 allocs/op +func BenchmarkDecodeFullNode(b *testing.B) { + node := &fullNode{} + for i := 0; i < 16; i++ { + node.Children[i] = hashNode(randBytes(32)) + } + blob := nodeToBytes(node) + hash := crypto.Keccak256(blob) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mustDecodeNode(hash, blob) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkDecodeFullNodeUnsafe +// BenchmarkDecodeFullNodeUnsafe-8 1789070 687.1 ns/op 704 B/op 17 allocs/op +func BenchmarkDecodeFullNodeUnsafe(b *testing.B) { + node := &fullNode{} + for i := 0; i < 16; i++ { + node.Children[i] = hashNode(randBytes(32)) + } + blob := nodeToBytes(node) + hash := crypto.Keccak256(blob) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mustDecodeNodeUnsafe(hash, blob) + } +} From 2c5648d8915dc6f4cd822a379819d774beba0dc5 Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Fri, 19 Aug 2022 01:00:21 -0500 Subject: [PATCH 04/26] all: fix some typos (#25551) * Fix some typos * Fix some mistakes * Revert 4byte.json * Fix an incorrect fix * Change files to fails --- accounts/abi/reflect_test.go | 2 +- accounts/keystore/account_cache_test.go | 2 +- accounts/keystore/file_cache.go | 4 +-- accounts/keystore/keystore_test.go | 2 +- accounts/usbwallet/trezor.go | 4 +-- accounts/usbwallet/wallet.go | 2 +- build/ci.go | 2 +- cmd/faucet/faucet.go | 2 +- cmd/geth/consolecmd_test.go | 2 +- cmd/puppeth/ssh.go | 2 +- common/prque/prque.go | 4 +-- consensus/clique/snapshot_test.go | 4 +-- console/console.go | 6 ++-- core/blockchain.go | 4 +-- core/blockchain_repair_test.go | 32 +++++++++---------- core/rawdb/accessors_chain_test.go | 2 +- core/rawdb/database.go | 2 +- core/rawdb/freezer_table.go | 2 +- core/state/snapshot/iterator_fast.go | 4 +-- core/state/snapshot/snapshot_test.go | 2 +- core/state/statedb.go | 6 ++-- core/state/statedb_test.go | 2 +- core/state/sync_test.go | 12 +++---- core/state/trie_prefetcher.go | 2 +- core/types/block.go | 2 +- core/types/block_test.go | 2 +- core/vm/runtime/runtime_test.go | 4 +-- crypto/bls12381/isogeny.go | 4 +-- eth/catalyst/api.go | 4 +-- eth/downloader/api.go | 2 +- eth/downloader/beaconsync.go | 2 +- eth/downloader/downloader.go | 4 +-- eth/downloader/downloader_test.go | 8 ++--- eth/downloader/fetchers_concurrent.go | 6 ++-- eth/downloader/fetchers_concurrent_bodies.go | 4 +-- eth/downloader/fetchers_concurrent_headers.go | 4 +-- .../fetchers_concurrent_receipts.go | 6 ++-- eth/downloader/queue.go | 2 +- eth/downloader/skeleton.go | 12 +++---- eth/downloader/skeleton_test.go | 14 ++++---- eth/ethconfig/config.go | 2 +- eth/fetcher/tx_fetcher.go | 10 +++--- eth/fetcher/tx_fetcher_test.go | 6 ++-- eth/filters/api.go | 2 +- eth/peerset.go | 2 +- eth/protocols/eth/broadcast.go | 2 +- eth/protocols/eth/dispatcher.go | 2 +- eth/protocols/eth/handler_test.go | 2 +- eth/protocols/eth/peer.go | 2 +- eth/protocols/snap/handler.go | 2 +- eth/protocols/snap/peer.go | 8 ++--- eth/protocols/snap/sync.go | 26 +++++++-------- eth/state_accessor.go | 2 +- eth/tracers/api.go | 10 +++--- .../internal/tracetest/calltrace_test.go | 2 +- ethclient/ethclient_test.go | 2 +- ethdb/memorydb/memorydb.go | 2 +- interfaces.go | 2 +- internal/ethapi/api.go | 2 +- les/downloader/api.go | 2 +- les/downloader/downloader.go | 4 +-- les/downloader/downloader_test.go | 4 +-- les/downloader/queue.go | 2 +- les/fetcher.go | 2 +- les/flowcontrol/manager.go | 2 +- les/odr.go | 2 +- les/vflux/client/fillset_test.go | 2 +- les/vflux/client/serverpool_test.go | 2 +- les/vflux/server/balance.go | 2 +- les/vflux/server/balance_test.go | 4 +-- les/vflux/server/status.go | 2 +- light/lightchain.go | 2 +- light/odr_util.go | 4 +-- light/postprocess.go | 4 +-- light/trie.go | 2 +- light/txpool.go | 2 +- metrics/gauge_float64_test.go | 2 +- metrics/gauge_test.go | 2 +- metrics/prometheus/prometheus.go | 2 +- miner/unconfirmed_test.go | 2 +- miner/worker_test.go | 2 +- mobile/accounts.go | 6 ++-- mobile/init.go | 2 +- node/rpcstack_test.go | 2 +- p2p/discover/v4_udp.go | 2 +- p2p/discover/v5_udp.go | 2 +- p2p/msgrate/msgrate.go | 6 ++-- p2p/tracker/tracker.go | 2 +- rpc/server.go | 2 +- signer/rules/rules_test.go | 8 ++--- signer/storage/aes_gcm_storage.go | 2 +- trie/hasher.go | 2 +- trie/proof_test.go | 32 +++++++++---------- trie/secure_trie_test.go | 2 +- 94 files changed, 200 insertions(+), 200 deletions(-) diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go index cf13a79da84..76ef1ad2aa3 100644 --- a/accounts/abi/reflect_test.go +++ b/accounts/abi/reflect_test.go @@ -32,7 +32,7 @@ type reflectTest struct { var reflectTests = []reflectTest{ { - name: "OneToOneCorrespondance", + name: "OneToOneCorrespondence", args: []string{"fieldA"}, struc: struct { FieldA int `abi:"fieldA"` diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index bdcd8118251..daea497d1ae 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -318,7 +318,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { func TestUpdatedKeyfileContents(t *testing.T) { t.Parallel() - // Create a temporary kesytore to test with + // Create a temporary keystore to test with rand.Seed(time.Now().UnixNano()) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) diff --git a/accounts/keystore/file_cache.go b/accounts/keystore/file_cache.go index b3ecf8946b5..79f9a296374 100644 --- a/accounts/keystore/file_cache.go +++ b/accounts/keystore/file_cache.go @@ -39,7 +39,7 @@ type fileCache struct { func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { t0 := time.Now() - // List all the failes from the keystore folder + // List all the files from the keystore folder files, err := os.ReadDir(keyDir) if err != nil { return nil, nil, nil, err @@ -61,7 +61,7 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er log.Trace("Ignoring file on account scan", "path", path) continue } - // Gather the set of all and fresly modified files + // Gather the set of all and freshly modified files all.Add(path) info, err := fi.Info() diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index 6a2e32f6d9c..4cdf0b1ed6c 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -214,7 +214,7 @@ func TestSignRace(t *testing.T) { // Tests that the wallet notifier loop starts and stops correctly based on the // addition and removal of wallet event subscriptions. func TestWalletNotifierLifecycle(t *testing.T) { - // Create a temporary kesytore to test with + // Create a temporary keystore to test with _, ks := tmpKeyStore(t, false) // Ensure that the notification updater is not running yet diff --git a/accounts/usbwallet/trezor.go b/accounts/usbwallet/trezor.go index c2182b88d03..e385682a583 100644 --- a/accounts/usbwallet/trezor.go +++ b/accounts/usbwallet/trezor.go @@ -196,10 +196,10 @@ func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, er if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil { return common.Address{}, err } - if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary fomats + if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary formats return common.BytesToAddress(addr), nil } - if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal fomats + if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal formats return common.HexToAddress(addr), nil } return common.Address{}, errors.New("missing derived address") diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index 06ff0636ae2..0e399a6d09a 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -380,7 +380,7 @@ func (w *wallet) selfDerive() { // of legacy-ledger, the first account on the legacy-path will // be shown to the user, even if we don't actively track it if i < len(nextAddrs)-1 { - w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(,, false) to track", + w.log.Info("Skipping tracking first account on legacy path, use personal.deriveAccount(,, false) to track", "path", path, "address", nextAddrs[i]) break } diff --git a/build/ci.go b/build/ci.go index f91de328beb..80b04004593 100644 --- a/build/ci.go +++ b/build/ci.go @@ -608,7 +608,7 @@ func doDocker(cmdline []string) { } if mismatch { // Build numbers mismatching, retry in a short time to - // avoid concurrent failes in both publisher images. If + // avoid concurrent fails in both publisher images. If // however the retry failed too, it means the concurrent // builder is still crunching, let that do the publish. if i == 0 { diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index d49b9ed29d9..dfb7d326dc4 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -709,7 +709,7 @@ func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, c case tokenV2 != "": return authTwitterWithTokenV2(tweetID, tokenV2) } - // Twiter API token isn't provided so we just load the public posts + // Twitter API token isn't provided so we just load the public posts // and scrape it for the Ethereum address and profile URL. We need to load // the mobile page though since the main page loads tweet contents via JS. url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1) diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index d5ebd74aedf..442b82df0b3 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -155,7 +155,7 @@ To exit, press ctrl-d or type exit } // trulyRandInt generates a crypto random integer used by the console tests to -// not clash network ports with other tests running cocurrently. +// not clash network ports with other tests running concurrently. func trulyRandInt(lo, hi int) int { num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo))) return int(num.Int64()) + lo diff --git a/cmd/puppeth/ssh.go b/cmd/puppeth/ssh.go index 0c23ab55622..a20b3bfda20 100644 --- a/cmd/puppeth/ssh.go +++ b/cmd/puppeth/ssh.go @@ -163,7 +163,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) { return nil } // We have a mismatch, forbid connecting - return errors.New("ssh key mismatch, readd the machine to update") + return errors.New("ssh key mismatch, re-add the machine to update") } client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck}) if err != nil { diff --git a/common/prque/prque.go b/common/prque/prque.go index 54c78b5fc2b..fb02e3418c2 100755 --- a/common/prque/prque.go +++ b/common/prque/prque.go @@ -41,13 +41,13 @@ func (p *Prque) Push(data interface{}, priority int64) { heap.Push(p.cont, &item{data, priority}) } -// Peek returns the value with the greates priority but does not pop it off. +// Peek returns the value with the greatest priority but does not pop it off. func (p *Prque) Peek() (interface{}, int64) { item := p.cont.blocks[0][0] return item.value, item.priority } -// Pops the value with the greates priority off the stack and returns it. +// Pops the value with the greatest priority off the stack and returns it. // Currently no shrinking is done. func (p *Prque) Pop() (interface{}, int64) { item := heap.Pop(p.cont).(*item) diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index b87ad8c23a7..4a067c62554 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -305,7 +305,7 @@ func TestClique(t *testing.T) { }, { // Ensure that pending votes don't survive authorization status changes. This // corner case can only appear if a signer is quickly added, removed and then - // readded (or the inverse), while one of the original voters dropped. If a + // re-added (or the inverse), while one of the original voters dropped. If a // past vote is left cached in the system somewhere, this will interfere with // the final signer outcome. signers: []string{"A", "B", "C", "D", "E"}, @@ -344,7 +344,7 @@ func TestClique(t *testing.T) { }, failure: errUnauthorizedSigner, }, { - // An authorized signer that signed recenty should not be able to sign again + // An authorized signer that signed recently should not be able to sign again signers: []string{"A", "B"}, votes: []testerVote{ {signer: "A"}, diff --git a/console/console.go b/console/console.go index c8f6c9cfeec..7b9ed27e15e 100644 --- a/console/console.go +++ b/console/console.go @@ -290,7 +290,7 @@ func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, str if len(line) == 0 || pos == 0 { return "", nil, "" } - // Chunck data to relevant part for autocompletion + // Chunk data to relevant part for autocompletion // E.g. in case of nested lines eth.getBalance(eth.coinb start := pos - 1 for ; start > 0; start-- { @@ -407,7 +407,7 @@ func (c *Console) StopInteractive() { } } -// Interactive starts an interactive user session, where in.put is propted from +// Interactive starts an interactive user session, where input is prompted from // the configured user prompter. func (c *Console) Interactive() { var ( @@ -497,7 +497,7 @@ func (c *Console) readLines(input chan<- string, errc chan<- error, prompt <-cha } } -// countIndents returns the number of identations for the given input. +// countIndents returns the number of indentations for the given input. // In case of invalid input such as var a = } the result can be negative. func countIndents(input string) int { var ( diff --git a/core/blockchain.go b/core/blockchain.go index ee95cfb6cb6..a98c3b4dbeb 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1375,7 +1375,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types } // In theory we should fire a ChainHeadEvent when we inject // a canonical block, but sometimes we can insert a batch of - // canonicial blocks. Avoid firing too many ChainHeadEvents, + // canonical blocks. Avoid firing too many ChainHeadEvents, // we will fire an accumulated ChainHeadEvent and disable fire // event here. if emitHeadEvent { @@ -1612,7 +1612,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) // block in the middle. It can only happen in the clique chain. Whenever // we insert blocks via `insertSideChain`, we only commit `td`, `header` // and `body` if it's non-existent. Since we don't have receipts without - // reexecution, so nothing to commit. But if the sidechain will be adpoted + // reexecution, so nothing to commit. But if the sidechain will be adopted // as the canonical chain eventually, it needs to be reexecuted for missing // state, but if it's this special case here(skip reexecution) we will lose // the empty receipt entry. diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 24309405d2b..feed8a17778 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -564,7 +564,7 @@ func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) { // Tests a recovery for a long canonical chain with frozen blocks where a recent // block - newer than the ancient limit - was already committed to disk and then // the process crashed. In this case we expect the chain to be rolled back to the -// committed block, with everything afterwads kept as fast sync data. +// committed block, with everything afterwards kept as fast sync data. func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) } func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) } @@ -609,7 +609,7 @@ func testLongShallowRepair(t *testing.T, snapshots bool) { // Tests a recovery for a long canonical chain with frozen blocks where a recent // block - older than the ancient limit - was already committed to disk and then // the process crashed. In this case we expect the chain to be rolled back to the -// committed block, with everything afterwads deleted. +// committed block, with everything afterwards deleted. func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) } func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) } @@ -653,7 +653,7 @@ func testLongDeepRepair(t *testing.T, snapshots bool) { // Tests a recovery for a long canonical chain with frozen blocks where the fast // sync pivot point - newer than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back -// to the committed block, with everything afterwads kept as fast sync data. +// to the committed block, with everything afterwards kept as fast sync data. func TestLongSnapSyncedShallowRepair(t *testing.T) { testLongSnapSyncedShallowRepair(t, false) } @@ -702,7 +702,7 @@ func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Tests a recovery for a long canonical chain with frozen blocks where the fast // sync pivot point - older than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back -// to the committed block, with everything afterwads deleted. +// to the committed block, with everything afterwards deleted. func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) } func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) } @@ -843,7 +843,7 @@ func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // side chain, where a recent block - newer than the ancient limit - was already // committed to disk and then the process crashed. In this test scenario the side // chain is below the committed block. In this case we expect the chain to be -// rolled back to the committed block, with everything afterwads kept as fast +// rolled back to the committed block, with everything afterwards kept as fast // sync data; the side chain completely nuked by the freezer. func TestLongOldForkedShallowRepair(t *testing.T) { testLongOldForkedShallowRepair(t, false) @@ -895,7 +895,7 @@ func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) { // side chain, where a recent block - older than the ancient limit - was already // committed to disk and then the process crashed. In this test scenario the side // chain is below the committed block. In this case we expect the canonical chain -// to be rolled back to the committed block, with everything afterwads deleted; +// to be rolled back to the committed block, with everything afterwards deleted; // the side chain completely nuked by the freezer. func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) } func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) } @@ -942,7 +942,7 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then the process crashed. In this test scenario // the side chain is below the committed block. In this case we expect the chain -// to be rolled back to the committed block, with everything afterwads kept as +// to be rolled back to the committed block, with everything afterwards kept as // fast sync data; the side chain completely nuked by the freezer. func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) { testLongOldForkedSnapSyncedShallowRepair(t, false) @@ -994,7 +994,7 @@ func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then the process crashed. In this test scenario // the side chain is below the committed block. In this case we expect the canonical -// chain to be rolled back to the committed block, with everything afterwads deleted; +// chain to be rolled back to the committed block, with everything afterwards deleted; // the side chain completely nuked by the freezer. func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) { testLongOldForkedSnapSyncedDeepRepair(t, false) @@ -1149,7 +1149,7 @@ func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // side chain, where a recent block - newer than the ancient limit - was already // committed to disk and then the process crashed. In this test scenario the side // chain is above the committed block. In this case we expect the chain to be -// rolled back to the committed block, with everything afterwads kept as fast +// rolled back to the committed block, with everything afterwards kept as fast // sync data; the side chain completely nuked by the freezer. func TestLongNewerForkedShallowRepair(t *testing.T) { testLongNewerForkedShallowRepair(t, false) @@ -1201,7 +1201,7 @@ func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) { // side chain, where a recent block - older than the ancient limit - was already // committed to disk and then the process crashed. In this test scenario the side // chain is above the committed block. In this case we expect the canonical chain -// to be rolled back to the committed block, with everything afterwads deleted; +// to be rolled back to the committed block, with everything afterwards deleted; // the side chain completely nuked by the freezer. func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) } func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) } @@ -1248,7 +1248,7 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then the process crashed. In this test scenario // the side chain is above the committed block. In this case we expect the chain -// to be rolled back to the committed block, with everything afterwads kept as fast +// to be rolled back to the committed block, with everything afterwards kept as fast // sync data; the side chain completely nuked by the freezer. func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) { testLongNewerForkedSnapSyncedShallowRepair(t, false) @@ -1300,7 +1300,7 @@ func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then the process crashed. In this test scenario // the side chain is above the committed block. In this case we expect the canonical -// chain to be rolled back to the committed block, with everything afterwads deleted; +// chain to be rolled back to the committed block, with everything afterwards deleted; // the side chain completely nuked by the freezer. func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) { testLongNewerForkedSnapSyncedDeepRepair(t, false) @@ -1454,7 +1454,7 @@ func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Tests a recovery for a long canonical chain with frozen blocks and a longer side // chain, where a recent block - newer than the ancient limit - was already committed // to disk and then the process crashed. In this case we expect the chain to be -// rolled back to the committed block, with everything afterwads kept as fast sync +// rolled back to the committed block, with everything afterwards kept as fast sync // data. The side chain completely nuked by the freezer. func TestLongReorgedShallowRepair(t *testing.T) { testLongReorgedShallowRepair(t, false) } func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) } @@ -1501,7 +1501,7 @@ func testLongReorgedShallowRepair(t *testing.T, snapshots bool) { // Tests a recovery for a long canonical chain with frozen blocks and a longer side // chain, where a recent block - older than the ancient limit - was already committed // to disk and then the process crashed. In this case we expect the canonical chains -// to be rolled back to the committed block, with everything afterwads deleted. The +// to be rolled back to the committed block, with everything afterwards deleted. The // side chain completely nuked by the freezer. func TestLongReorgedDeepRepair(t *testing.T) { testLongReorgedDeepRepair(t, false) } func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) } @@ -1548,7 +1548,7 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then the process crashed. In this case we // expect the chain to be rolled back to the committed block, with everything -// afterwads kept as fast sync data. The side chain completely nuked by the +// afterwards kept as fast sync data. The side chain completely nuked by the // freezer. func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) { testLongReorgedSnapSyncedShallowRepair(t, false) @@ -1600,7 +1600,7 @@ func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then the process crashed. In this case we // expect the canonical chains to be rolled back to the committed block, with -// everything afterwads deleted. The side chain completely nuked by the freezer. +// everything afterwards deleted. The side chain completely nuked by the freezer. func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) { testLongReorgedSnapSyncedDeepRepair(t, false) } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index dbb13caa416..21d23e1f0c8 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -285,7 +285,7 @@ func TestTdStorage(t *testing.T) { func TestCanonicalMappingStorage(t *testing.T) { db := NewMemoryDatabase() - // Create a test canonical number and assinged hash to move around + // Create a test canonical number and assigned hash to move around hash, number := common.Hash{0: 0xff}, uint64(314) if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) { t.Fatalf("Non existent canonical mapping returned: %v", entry) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 831ca69c4c0..1eaf033bbef 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -260,7 +260,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 { return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path") } - // Block #1 is still in the database, we're allowed to init a new feezer + // Block #1 is still in the database, we're allowed to init a new freezer } // Otherwise, the head header is still the genesis, we're allowed to init a new // freezer. diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index 51d7d193085..3fe691cf6d2 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -46,7 +46,7 @@ var ( errNotSupported = errors.New("this operation is not supported") ) -// indexEntry contains the number/id of the file that the data resides in, aswell as the +// indexEntry contains the number/id of the file that the data resides in, as well as the // offset within the file to the end of the data. // In serialized form, the filenum is stored as uint16. type indexEntry struct { diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go index 48069b8fcf5..435c28e96f9 100644 --- a/core/state/snapshot/iterator_fast.go +++ b/core/state/snapshot/iterator_fast.go @@ -319,7 +319,7 @@ func (fi *fastIterator) Slot() []byte { } // Release iterates over all the remaining live layer iterators and releases each -// of thme individually. +// of them individually. func (fi *fastIterator) Release() { for _, it := range fi.iterators { it.it.Release() @@ -327,7 +327,7 @@ func (fi *fastIterator) Release() { fi.iterators = nil } -// Debug is a convencience helper during testing +// Debug is a convenience helper during testing func (fi *fastIterator) Debug() { for _, it := range fi.iterators { fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0]) diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index bc4e5cbd046..7c8077b652e 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -265,7 +265,7 @@ func TestPostCapBasicDataAccess(t *testing.T) { snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) - // checkExist verifies if an account exiss in a snapshot + // checkExist verifies if an account exists in a snapshot checkExist := func(layer *diffLayer, key string) error { if data, _ := layer.Account(common.HexToHash(key)); data == nil { return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key)) diff --git a/core/state/statedb.go b/core/state/statedb.go index 5c97dd94ade..50eee8183c3 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -792,7 +792,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // If state snapshotting is active, also mark the destruction there. // Note, we can't do this only at the end of a block because multiple // transactions within the same block might self destruct and then - // ressurrect an account; but the snapshotter needs both events. + // resurrect an account; but the snapshotter needs both events. if s.snap != nil { s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect) @@ -891,7 +891,7 @@ func (s *StateDB) clearJournalAndRefund() { s.journal = newJournal() s.refund = 0 } - s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires + s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries } // Commit writes the state to the underlying in-memory trie database. @@ -938,7 +938,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { log.Crit("Failed to commit dirty codes", "error", err) } } - // Write the account trie changes, measuing the amount of wasted time + // Write the account trie changes, measuring the amount of wasted time var start time.Time if metrics.EnabledExpensive { start = time.Now() diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 6af2d4523b3..092a4fb8711 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -771,7 +771,7 @@ func TestStateDBAccessList(t *testing.T) { t.Fatalf("expected %x to be in access list", address) } } - // Check that only the expected addresses are present in the acesslist + // Check that only the expected addresses are present in the access list for address := range state.accessList.addresses { if _, exist := addressMap[address]; !exist { t.Fatalf("extra address %x in access list", address) diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 95c79eaf36a..3d9fe556d2a 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -305,8 +305,8 @@ func TestIterativeDelayedStateSync(t *testing.T) { } for len(nodeElements)+len(codeElements) > 0 { // Sync only half of the scheduled nodes - var nodeProcessd int - var codeProcessd int + var nodeProcessed int + var codeProcessed int if len(codeElements) > 0 { codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1) for i, element := range codeElements[:len(codeResults)] { @@ -321,7 +321,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { t.Fatalf("failed to process result %v", err) } } - codeProcessd = len(codeResults) + codeProcessed = len(codeResults) } if len(nodeElements) > 0 { nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1) @@ -337,7 +337,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { t.Fatalf("failed to process result %v", err) } } - nodeProcessd = len(nodeResults) + nodeProcessed = len(nodeResults) } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -346,7 +346,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { batch.Write() paths, nodes, codes = sched.Missing(0) - nodeElements = nodeElements[nodeProcessd:] + nodeElements = nodeElements[nodeProcessed:] for i := 0; i < len(paths); i++ { nodeElements = append(nodeElements, stateElement{ path: paths[i], @@ -354,7 +354,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { syncPath: trie.NewSyncPath([]byte(paths[i])), }) } - codeElements = codeElements[codeProcessd:] + codeElements = codeElements[codeProcessed:] for i := 0; i < len(codes); i++ { codeElements = append(codeElements, stateElement{ code: codes[i], diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 0f6bce3b817..83e8966d4c9 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -212,7 +212,7 @@ type subfetcher struct { wake chan struct{} // Wake channel if a new task is scheduled stop chan struct{} // Channel to interrupt processing - term chan struct{} // Channel to signal iterruption + term chan struct{} // Channel to signal interruption copy chan chan Trie // Channel to request a copy of the current trie seen map[string]struct{} // Tracks the entries already loaded diff --git a/core/types/block.go b/core/types/block.go index 589a34cef6b..7525a88f5a3 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -317,7 +317,7 @@ func (b *Block) Header() *Header { return CopyHeader(b.header) } func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} } // Size returns the true RLP encoded storage size of the block, either by encoding -// and returning it, or returning a previsouly cached value. +// and returning it, or returning a previously cached value. func (b *Block) Size() common.StorageSize { if size := b.size.Load(); size != nil { return size.(common.StorageSize) diff --git a/core/types/block_test.go b/core/types/block_test.go index aa1db2f4faa..9e7f581b1dc 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -314,7 +314,7 @@ func TestRlpDecodeParentHash(t *testing.T) { } // Also test a very very large header. { - // The rlp-encoding of the heder belowCauses _total_ length of 65540, + // The rlp-encoding of the header belowCauses _total_ length of 65540, // which is the first to blow the fast-path. h := &Header{ ParentHash: want, diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 0fb28729289..ab77e284df3 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -457,7 +457,7 @@ func BenchmarkSimpleLoop(b *testing.B) { byte(vm.JUMP), } - calllRevertingContractWithInput := []byte{ + callRevertingContractWithInput := []byte{ byte(vm.JUMPDEST), // // push args for the call byte(vm.PUSH1), 0, // out size @@ -485,7 +485,7 @@ func BenchmarkSimpleLoop(b *testing.B) { benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", "", b) benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", "", b) benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", "", b) - benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", "", b) + benchmarkNonModifyingCode(100000000, callRevertingContractWithInput, "call-reverting-100M", "", b) //benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b) //benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b) diff --git a/crypto/bls12381/isogeny.go b/crypto/bls12381/isogeny.go index c3cb0a6f7bf..a63f585dd00 100644 --- a/crypto/bls12381/isogeny.go +++ b/crypto/bls12381/isogeny.go @@ -19,7 +19,7 @@ package bls12381 // isogenyMapG1 applies 11-isogeny map for BLS12-381 G1 defined at draft-irtf-cfrg-hash-to-curve-06. func isogenyMapG1(x, y *fe) { // https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-C.2 - params := isogenyConstansG1 + params := isogenyConstantsG1 degree := 15 xNum, xDen, yNum, yDen := new(fe), new(fe), new(fe), new(fe) xNum.set(params[0][degree]) @@ -76,7 +76,7 @@ func isogenyMapG2(e *fp2, x, y *fe2) { y.set(yNum) } -var isogenyConstansG1 = [4][16]*fe{ +var isogenyConstantsG1 = [4][16]*fe{ { {0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4}, {0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad}, diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 6b5f94646fa..358529459b1 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -539,7 +539,7 @@ func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) beacon.Pa return beacon.PayloadStatusV1{Status: beacon.INVALID, LatestValidHash: ¤tHash, ValidationError: &errorMsg} } -// heatbeat loops indefinitely, and checks if there have been beacon client updates +// heartbeat loops indefinitely, and checks if there have been beacon client updates // received in the last while. If not - or if they but strange ones - it warns the // user that something might be off with their consensus node. // @@ -649,7 +649,7 @@ func (api *ConsensusAPI) heartbeat() { if eta == 0 { log.Warn(message) } else { - log.Warn(message, "eta", common.PrettyAge(time.Now().Add(-eta))) // weird hack, but duration formatted doens't handle days + log.Warn(message, "eta", common.PrettyAge(time.Now().Add(-eta))) // weird hack, but duration formatted doesn't handle days } offlineLogged = time.Now() } diff --git a/eth/downloader/api.go b/eth/downloader/api.go index b36dd638650..b3f7113bcde 100644 --- a/eth/downloader/api.go +++ b/eth/downloader/api.go @@ -125,7 +125,7 @@ type SyncingResult struct { Status ethereum.SyncProgress `json:"status"` } -// uninstallSyncSubscriptionRequest uninstalles a syncing subscription in the API event loop. +// uninstallSyncSubscriptionRequest uninstalls a syncing subscription in the API event loop. type uninstallSyncSubscriptionRequest struct { c chan interface{} uninstalled chan interface{} diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 77353925813..484a4e20de6 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -236,7 +236,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { // Binary search to find the ancestor start, end := beaconTail.Number.Uint64()-1, number if number := beaconHead.Number.Uint64(); end > number { - // This shouldn't really happen in a healty network, but if the consensus + // This shouldn't really happen in a healthy network, but if the consensus // clients feeds us a shorter chain as the canonical, we should not attempt // to access non-existent skeleton items. log.Warn("Beacon head lower than local chain", "beacon", number, "local", end) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f9ac8e487bf..c04352f0aac 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -364,7 +364,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // The beacon header syncer is async. It will start this synchronization and // will continue doing other tasks. However, if synchronization needs to be // cancelled, the syncer needs to know if we reached the startup point (and - // inited the cancel cannel) or not yet. Make sure that we'll signal even in + // inited the cancel channel) or not yet. Make sure that we'll signal even in // case of a failure. if beaconPing != nil { defer func() { @@ -1461,7 +1461,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } d.syncStatsLock.Unlock() - // Signal the content downloaders of the availablility of new tasks + // Signal the content downloaders of the availability of new tasks for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case ch <- true: diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 1c126bdaed3..450ed61efc5 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -360,7 +360,7 @@ func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limi } // RequestStorageRanges fetches a batch of storage slots belonging to one or -// more accounts. If slots from only one accout is requested, an origin marker +// more accounts. If slots from only one account is requested, an origin marker // may also be used to retrieve from there. func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { // Create the request and service it @@ -399,7 +399,7 @@ func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, } // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in -// a specificstate trie. +// a specific state trie. func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { req := &snap.GetTrieNodesPacket{ ID: id, @@ -571,8 +571,8 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chainB.blocks)) } -// Tests that synchronising against a much shorter but much heavyer fork works -// corrently and is not dropped. +// Tests that synchronising against a much shorter but much heavier fork works +// currently and is not dropped. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index a0aa197175a..44e6aa8f8d8 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -47,7 +47,7 @@ type typedQueue interface { // capacity is responsible for calculating how many items of the abstracted // type a particular peer is estimated to be able to retrieve within the - // alloted round trip time. + // allotted round trip time. capacity(peer *peerConnection, rtt time.Duration) int // updateCapacity is responsible for updating how many items of the abstracted @@ -58,7 +58,7 @@ type typedQueue interface { // from the download queue to the specified peer. reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) - // unreserve is resposible for removing the current retrieval allocation + // unreserve is responsible for removing the current retrieval allocation // assigned to a specific peer and placing it back into the pool to allow // reassigning to some other peer. unreserve(peer string) int @@ -190,7 +190,7 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { req, err := queue.request(peer, request, responses) if err != nil { // Sending the request failed, which generally means the peer - // was diconnected in between assignment and network send. + // was disconnected in between assignment and network send. // Although all peer removal operations return allocated tasks // to the queue, that is async, and we can do better here by // immediately pushing the unfulfilled requests. diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index a8de410323f..e84206fe995 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -41,7 +41,7 @@ func (q *bodyQueue) pending() int { } // capacity is responsible for calculating how many bodies a particular peer is -// estimated to be able to retrieve within the alloted round trip time. +// estimated to be able to retrieve within the allotted round trip time. func (q *bodyQueue) capacity(peer *peerConnection, rtt time.Duration) int { return peer.BodyCapacity(rtt) } @@ -58,7 +58,7 @@ func (q *bodyQueue) reserve(peer *peerConnection, items int) (*fetchRequest, boo return q.queue.ReserveBodies(peer, items) } -// unreserve is resposible for removing the current body retrieval allocation +// unreserve is responsible for removing the current body retrieval allocation // assigned to a specific peer and placing it back into the pool to allow // reassigning to some other peer. func (q *bodyQueue) unreserve(peer string) int { diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go index bd3bb3e00bf..84c7f209865 100644 --- a/eth/downloader/fetchers_concurrent_headers.go +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -41,7 +41,7 @@ func (q *headerQueue) pending() int { } // capacity is responsible for calculating how many headers a particular peer is -// estimated to be able to retrieve within the alloted round trip time. +// estimated to be able to retrieve within the allotted round trip time. func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { return peer.HeaderCapacity(rtt) } @@ -58,7 +58,7 @@ func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, b return q.queue.ReserveHeaders(peer, items), false, false } -// unreserve is resposible for removing the current header retrieval allocation +// unreserve is responsible for removing the current header retrieval allocation // assigned to a specific peer and placing it back into the pool to allow // reassigning to some other peer. func (q *headerQueue) unreserve(peer string) int { diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go index fee2c34101d..1c853c21844 100644 --- a/eth/downloader/fetchers_concurrent_receipts.go +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -28,7 +28,7 @@ import ( // concurrent fetcher and the downloader. type receiptQueue Downloader -// waker returns a notification channel that gets pinged in case more reecipt +// waker returns a notification channel that gets pinged in case more receipt // fetches have been queued up, so the fetcher might assign it to idle peers. func (q *receiptQueue) waker() chan bool { return q.queue.receiptWakeCh @@ -41,7 +41,7 @@ func (q *receiptQueue) pending() int { } // capacity is responsible for calculating how many receipts a particular peer is -// estimated to be able to retrieve within the alloted round trip time. +// estimated to be able to retrieve within the allotted round trip time. func (q *receiptQueue) capacity(peer *peerConnection, rtt time.Duration) int { return peer.ReceiptCapacity(rtt) } @@ -58,7 +58,7 @@ func (q *receiptQueue) reserve(peer *peerConnection, items int) (*fetchRequest, return q.queue.ReserveReceipts(peer, items) } -// unreserve is resposible for removing the current receipt retrieval allocation +// unreserve is responsible for removing the current receipt retrieval allocation // assigned to a specific peer and placing it back into the pool to allow // reassigning to some other peer. func (q *receiptQueue) unreserve(peer string) int { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index de5708be32e..a8d2ea83a9e 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -859,7 +859,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil { reconstruct(accepted, res) } else { - // else: betweeen here and above, some other peer filled this result, + // else: between here and above, some other peer filled this result, // or it was indeed a no-op. This should not happen, but if it does it's // not something to panic about log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index be4e8fbfc10..e627c6ae5a3 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -51,7 +51,7 @@ const requestHeaders = 512 // errSyncLinked is an internal helper error to signal that the current sync // cycle linked up to the genesis block, this the skeleton syncer should ping // the backfiller to resume. Since we already have that logic on sync start, -// piggie-back on that instead of 2 entrypoints. +// piggy-back on that instead of 2 entrypoints. var errSyncLinked = errors.New("sync linked") // errSyncMerged is an internal helper error to signal that the current sync @@ -148,7 +148,7 @@ type backfiller interface { // suspend requests the backfiller to abort any running full or snap sync // based on the skeleton chain as it might be invalid. The backfiller should // gracefully handle multiple consecutive suspends without a resume, even - // on initial sartup. + // on initial startup. // // The method should return the last block header that has been successfully // backfilled, or nil if the backfiller was not resumed. @@ -209,7 +209,7 @@ type skeleton struct { headEvents chan *headUpdate // Notification channel for new heads terminate chan chan error // Termination channel to abort sync - terminated chan struct{} // Channel to signal that the syner is dead + terminated chan struct{} // Channel to signal that the syncer is dead // Callback hooks used during testing syncStarting func() // callback triggered after a sync cycle is inited but before started @@ -553,7 +553,7 @@ func (s *skeleton) initSync(head *types.Header) { return } } - // Either we've failed to decode the previus state, or there was none. Start + // Either we've failed to decode the previous state, or there was none. Start // a fresh sync with a single subchain represented by the currently sent // chain head. s.progress = &skeletonProgress{ @@ -823,7 +823,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { } } -// revertRequests locates all the currently pending reuqests from a particular +// revertRequests locates all the currently pending requests from a particular // peer and reverts them, rescheduling for others to fulfill. func (s *skeleton) revertRequests(peer string) { // Gather the requests first, revertals need the lock too @@ -871,7 +871,7 @@ func (s *skeleton) revertRequest(req *headerRequest) { delete(s.requests, req.id) // Remove the request from the tracked set and mark the task as not-pending, - // ready for resheduling + // ready for rescheduling s.scratchOwners[(s.scratchHead-req.head)/requestHeaders] = "" } diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 42192571804..6bcbac3a89f 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -53,7 +53,7 @@ func newHookedBackfiller() backfiller { // suspend requests the backfiller to abort any running full or snap sync // based on the skeleton chain as it might be invalid. The backfiller should // gracefully handle multiple consecutive suspends without a resume, even -// on initial sartup. +// on initial startup. func (hf *hookedBackfiller) suspend() *types.Header { if hf.suspendHook != nil { hf.suspendHook() @@ -111,7 +111,7 @@ func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func( // function can be used to retrieve batches of headers from the particular peer. func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Since skeleton test peer are in-memory mocks, dropping the does not make - // them inaccepssible. As such, check a local `dropped` field to see if the + // them inaccessible. As such, check a local `dropped` field to see if the // peer has been dropped and should not respond any more. if atomic.LoadUint64(&p.dropped) != 0 { return nil, errors.New("peer already dropped") @@ -204,7 +204,7 @@ func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (* panic("skeleton sync must not request receipts") } -// Tests various sync initialzations based on previous leftovers in the database +// Tests various sync initializations based on previous leftovers in the database // and announced heads. func TestSkeletonSyncInit(t *testing.T) { // Create a few key headers @@ -227,7 +227,7 @@ func TestSkeletonSyncInit(t *testing.T) { newstate: []*subchain{{Head: 50, Tail: 50}}, }, // Empty database with only the genesis set with a leftover empty sync - // progess. This is a synthetic case, just for the sake of covering things. + // progress. This is a synthetic case, just for the sake of covering things. { oldstate: []*subchain{}, head: block50, @@ -533,13 +533,13 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers []*skeletonTestPeer // Initial peer set to start the sync with midstate []*subchain // Expected sync state after initial cycle midserve uint64 // Expected number of header retrievals after initial cycle - middrop uint64 // Expectd number of peers dropped after initial cycle + middrop uint64 // Expected number of peers dropped after initial cycle - newHead *types.Header // New header to annount on top of the old one + newHead *types.Header // New header to anoint on top of the old one newPeer *skeletonTestPeer // New peer to join the skeleton syncer endstate []*subchain // Expected sync state after the post-init event endserve uint64 // Expected number of header retrievals after the post-init event - enddrop uint64 // Expectd number of peers dropped after the post-init event + enddrop uint64 // Expected number of peers dropped after the post-init event }{ // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. No peers however, so diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 7ba2faf791b..d1323de7b0f 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -196,7 +196,7 @@ type Config struct { RPCEVMTimeout time.Duration // RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for - // send-transction variants. The unit is ether. + // send-transaction variants. The unit is ether. RPCTxFeeCap float64 // Checkpoint is a hardcoded checkpoint which can be nil. diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index a23cd24bf10..035e0c2ec7d 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -120,7 +120,7 @@ type txDelivery struct { direct bool // Whether this is a direct reply or a broadcast } -// txDrop is the notiication that a peer has disconnected. +// txDrop is the notification that a peer has disconnected. type txDrop struct { peer string } @@ -260,7 +260,7 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { // Enqueue imports a batch of received transaction into the transaction pool // and the fetcher. This method may be called by both transaction broadcasts and // direct request replies. The differentiation is important so the fetcher can -// re-shedule missing transactions as soon as possible. +// re-schedule missing transactions as soon as possible. func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error { // Keep track of all the propagated transactions if direct { @@ -558,7 +558,7 @@ func (f *TxFetcher) loop() { // In case of a direct delivery, also reschedule anything missing // from the original query if delivery.direct { - // Mark the reqesting successful (independent of individual status) + // Mark the requesting successful (independent of individual status) txRequestDoneMeter.Mark(int64(len(delivery.hashes))) // Make sure something was pending, nuke it @@ -607,7 +607,7 @@ func (f *TxFetcher) loop() { delete(f.alternates, hash) delete(f.fetching, hash) } - // Something was delivered, try to rechedule requests + // Something was delivered, try to reschedule requests f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too } @@ -719,7 +719,7 @@ func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) { // should be rescheduled if some request is pending. In practice, a timeout will // cause the timer to be rescheduled every 5 secs (until the peer comes through or // disconnects). This is a limitation of the fetcher code because we don't trac -// pending requests and timed out requests separatey. Without double tracking, if +// pending requests and timed out requests separately. Without double tracking, if // we simply didn't reschedule the timer on all-timeout then the timer would never // be set again since len(request) > 0 => something's running. func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) { diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index ce8d02af7dd..4c06712f775 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -1011,7 +1011,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { } // Tests that dropping a peer cleans out all internal data structures in all the -// live or danglng stages. +// live or dangling stages. func TestTransactionFetcherDrop(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { @@ -1121,7 +1121,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) { } // This test reproduces a crash caught by the fuzzer. The root cause was a -// dangling transaction timing out and clashing on readd with a concurrently +// dangling transaction timing out and clashing on re-add with a concurrently // announced one. func TestTransactionFetcherFuzzCrash01(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ @@ -1148,7 +1148,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) { } // This test reproduces a crash caught by the fuzzer. The root cause was a -// dangling transaction getting peer-dropped and clashing on readd with a +// dangling transaction getting peer-dropped and clashing on re-add with a // concurrently announced one. func TestTransactionFetcherFuzzCrash02(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ diff --git a/eth/filters/api.go b/eth/filters/api.go index 07714791d26..3b8933d0af9 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -36,7 +36,7 @@ import ( // and associated subscription in the event system. type filter struct { typ Type - deadline *time.Timer // filter is inactiv when deadline triggers + deadline *time.Timer // filter is inactive when deadline triggers hashes []common.Hash crit FilterCriteria logs []*types.Log diff --git a/eth/peerset.go b/eth/peerset.go index 3e54a481e36..b9cc1e03aca 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -41,7 +41,7 @@ var ( errPeerNotRegistered = errors.New("peer not registered") // errSnapWithoutEth is returned if a peer attempts to connect only on the - // snap protocol without advertizing the eth main protocol. + // snap protocol without advertising the eth main protocol. errSnapWithoutEth = errors.New("peer connected on snap without compatible eth support") ) diff --git a/eth/protocols/eth/broadcast.go b/eth/protocols/eth/broadcast.go index 09330cfdf32..6fc15f136bf 100644 --- a/eth/protocols/eth/broadcast.go +++ b/eth/protocols/eth/broadcast.go @@ -36,7 +36,7 @@ type blockPropagation struct { td *big.Int } -// broadcastBlocks is a write loop that multiplexes blocks and block accouncements +// broadcastBlocks is a write loop that multiplexes blocks and block announcements // to the remote peer. The goal is to have an async writer that does not lock up // node internals and at the same time rate limits queued data. func (p *Peer) broadcastBlocks() { diff --git a/eth/protocols/eth/dispatcher.go b/eth/protocols/eth/dispatcher.go index bf88d400d4a..65a935d5554 100644 --- a/eth/protocols/eth/dispatcher.go +++ b/eth/protocols/eth/dispatcher.go @@ -224,7 +224,7 @@ func (p *Peer) dispatcher() { switch { case res.Req == nil: // Response arrived with an untracked ID. Since even cancelled - // requests are tracked until fulfilment, a dangling repsponse + // requests are tracked until fulfilment, a dangling response // means the remote peer implements the protocol badly. resOp.fail <- errDanglingResponse diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index bf836e8f513..2707a420bc6 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -94,7 +94,7 @@ func (b *testBackend) Chain() *core.BlockChain { return b.chain } func (b *testBackend) TxPool() TxPool { return b.txpool } func (b *testBackend) RunPeer(peer *Peer, handler Handler) error { - // Normally the backend would do peer mainentance and handshakes. All that + // Normally the backend would do peer maintenance and handshakes. All that // is omitted and we will just give control back to the handler. return handler(peer) } diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 22674d65b04..a23726384d7 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -133,7 +133,7 @@ func (p *Peer) ID() string { return p.id } -// Version retrieves the peer's negoatiated `eth` protocol version. +// Version retrieves the peer's negotiated `eth` protocol version. func (p *Peer) Version() uint { return p.version } diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 77bd96f46e8..41380d96f57 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -504,7 +504,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s var ( nodes [][]byte bytes uint64 - loads int // Trie hash expansions to cound database reads + loads int // Trie hash expansions to count database reads ) for _, pathset := range req.Paths { switch len(pathset) { diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go index 87a62d2f8a4..235d499ffdc 100644 --- a/eth/protocols/snap/peer.go +++ b/eth/protocols/snap/peer.go @@ -61,12 +61,12 @@ func (p *Peer) ID() string { return p.id } -// Version retrieves the peer's negoatiated `snap` protocol version. +// Version retrieves the peer's negotiated `snap` protocol version. func (p *Peer) Version() uint { return p.version } -// Log overrides the P2P logget with the higher level one containing only the id. +// Log overrides the P2P logger with the higher level one containing only the id. func (p *Peer) Log() log.Logger { return p.logger } @@ -87,7 +87,7 @@ func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit co } // RequestStorageRange fetches a batch of storage slots belonging to one or more -// accounts. If slots from only one accout is requested, an origin marker may also +// accounts. If slots from only one account is requested, an origin marker may also // be used to retrieve from there. func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { if len(accounts) == 1 && origin != nil { @@ -119,7 +119,7 @@ func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) e } // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in -// a specificstate trie. +// a specific state trie. func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index b2462f5f892..deaa4456e0f 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -365,7 +365,7 @@ type SyncPeer interface { RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error // RequestStorageRanges fetches a batch of storage slots belonging to one or - // more accounts. If slots from only one accout is requested, an origin marker + // more accounts. If slots from only one account is requested, an origin marker // may also be used to retrieve from there. RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error @@ -373,7 +373,7 @@ type SyncPeer interface { RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in - // a specificstate trie. + // a specific state trie. RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error // Log retrieves the peer's own contextual logger. @@ -1183,10 +1183,10 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st } if subtask == nil { // No large contract required retrieval, but small ones available - for acccount, root := range task.stateTasks { - delete(task.stateTasks, acccount) + for account, root := range task.stateTasks { + delete(task.stateTasks, account) - accounts = append(accounts, acccount) + accounts = append(accounts, account) roots = append(roots, root) if len(accounts) >= storageSets { @@ -1486,7 +1486,7 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai } } -// revertRequests locates all the currently pending reuqests from a particular +// revertRequests locates all the currently pending requests from a particular // peer and reverts them, rescheduling for others to fulfill. func (s *Syncer) revertRequests(peer string) { // Gather the requests first, revertals need the lock too @@ -1575,7 +1575,7 @@ func (s *Syncer) revertAccountRequest(req *accountRequest) { s.lock.Unlock() // If there's a timeout timer still running, abort it and mark the account - // task as not-pending, ready for resheduling + // task as not-pending, ready for rescheduling req.timeout.Stop() if req.task.req == req { req.task.req = nil @@ -1616,7 +1616,7 @@ func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) { s.lock.Unlock() // If there's a timeout timer still running, abort it and mark the code - // retrievals as not-pending, ready for resheduling + // retrievals as not-pending, ready for rescheduling req.timeout.Stop() for _, hash := range req.hashes { req.task.codeTasks[hash] = struct{}{} @@ -1657,7 +1657,7 @@ func (s *Syncer) revertStorageRequest(req *storageRequest) { s.lock.Unlock() // If there's a timeout timer still running, abort it and mark the storage - // task as not-pending, ready for resheduling + // task as not-pending, ready for rescheduling req.timeout.Stop() if req.subTask != nil { req.subTask.req = nil @@ -1743,7 +1743,7 @@ func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) { s.lock.Unlock() // If there's a timeout timer still running, abort it and mark the code - // retrievals as not-pending, ready for resheduling + // retrievals as not-pending, ready for rescheduling req.timeout.Stop() for _, hash := range req.hashes { req.task.codeTasks[hash] = struct{}{} @@ -2035,7 +2035,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { } tr.Commit() } - // Persist the received storage segements. These flat state maybe + // Persist the received storage segments. These flat state maybe // outdated during the sync, but it can be fixed later during the // snapshot generation. for j := 0; j < len(res.hashes[i]); j++ { @@ -2170,7 +2170,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) { } task.res = nil - // Persist the received account segements. These flat state maybe + // Persist the received account segments. These flat state maybe // outdated during the sync, but it can be fixed later during the // snapshot generation. oldAccountBytes := s.accountBytes @@ -2773,7 +2773,7 @@ func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) e } // onHealState is a callback method to invoke when a flat state(account -// or storage slot) is downloded during the healing stage. The flat states +// or storage slot) is downloaded during the healing stage. The flat states // can be persisted blindly and can be fixed later in the generation stage. // Note it's not concurrent safe, please handle the concurrent issue outside. func (s *Syncer) onHealState(paths [][]byte, value []byte) error { diff --git a/eth/state_accessor.go b/eth/state_accessor.go index f01db93a678..12dba8a0a9b 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -44,7 +44,7 @@ import ( // perform Commit or other 'save-to-disk' changes, this should be set to false to avoid // storing trash persistently // - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, -// it would be preferrable to start from a fresh state, if we have it on disk. +// it would be preferable to start from a fresh state, if we have it on disk. func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { var ( current *types.Block diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 014e2f6ad8d..092950e78fa 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -116,7 +116,7 @@ func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.H return header } -// chainContext construts the context reader which is used by the evm for reading +// chainContext constructs the context reader which is used by the evm for reading // the necessary chain context. func (api *API) chainContext(ctx context.Context) core.ChainContext { return &chainContext{api: api, ctx: ctx} @@ -202,10 +202,10 @@ type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from rootref common.Hash // Trie root reference held for this task - results []*txTraceResult // Trace results procudes by the task + results []*txTraceResult // Trace results produced by the task } -// blockTraceResult represets the results of tracing a single block when an entire +// blockTraceResult represents the results of tracing a single block when an entire // chain is being traced. type blockTraceResult struct { Block hexutil.Uint64 `json:"block"` // Block number corresponding to this trace @@ -563,7 +563,7 @@ func (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Has // traceBlock configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The return value will be one item -// per transaction, dependent on the requestd tracer. +// per transaction, dependent on the requested tracer. func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { if block.NumberU64() == 0 { return nil, errors.New("genesis is not traceable") @@ -707,7 +707,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block } } for i, tx := range block.Transactions() { - // Prepare the trasaction for un-traced execution + // Prepare the transaction for un-traced execution var ( msg, _ = tx.AsMessage(signer, block.BaseFee()) txContext = core.NewEVMTxContext(msg) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index cabddac4990..d2c50656d9a 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -39,7 +39,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" - // Force-load native and js pacakges, to trigger registration + // Force-load native and js packages, to trigger registration _ "github.com/ethereum/go-ethereum/eth/tracers/js" _ "github.com/ethereum/go-ethereum/eth/tracers/native" ) diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index d78a12fae72..f2f4a5765d1 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -581,7 +581,7 @@ func testCallContract(t *testing.T, client *rpc.Client) { if _, err := ec.CallContract(context.Background(), msg, big.NewInt(1)); err != nil { t.Fatalf("unexpected error: %v", err) } - // PendingCallCOntract + // PendingCallContract if _, err := ec.PendingCallContract(context.Background(), msg); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index e94570cb3f0..7e4fd7e5e7f 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -66,7 +66,7 @@ func NewWithCap(size int) *Database { } // Close deallocates the internal map and ensures any consecutive data access op -// failes with an error. +// fails with an error. func (db *Database) Close() error { db.lock.Lock() defer db.lock.Unlock() diff --git a/interfaces.go b/interfaces.go index e8d24a57cf7..eb9af60076e 100644 --- a/interfaces.go +++ b/interfaces.go @@ -204,7 +204,7 @@ type GasPricer interface { // FeeHistory provides recent fee market data that consumers can use to determine // a reasonable maxPriorityFeePerGas value. type FeeHistory struct { - OldestBlock *big.Int // block coresponding to first response value + OldestBlock *big.Int // block corresponding to first response value Reward [][]*big.Int // list every txs priority fee per block BaseFee []*big.Int // list of each block's base fee GasUsedRatio []float64 // ratio of gas used out of the total available limit diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 939dd69396f..e6740942d85 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -988,7 +988,7 @@ func newRevertError(result *core.ExecutionResult) *revertError { } } -// revertError is an API error that encompassas an EVM revertal with JSON error +// revertError is an API error that encompasses an EVM revertal with JSON error // code and a binary data blob. type revertError struct { error diff --git a/les/downloader/api.go b/les/downloader/api.go index b1a81b6b76d..21200b676c6 100644 --- a/les/downloader/api.go +++ b/les/downloader/api.go @@ -125,7 +125,7 @@ type SyncingResult struct { Status ethereum.SyncProgress `json:"status"` } -// uninstallSyncSubscriptionRequest uninstalles a syncing subscription in the API event loop. +// uninstallSyncSubscriptionRequest uninstalls a syncing subscription in the API event loop. type uninstallSyncSubscriptionRequest struct { c chan interface{} uninstalled chan interface{} diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go index b9da76d3478..6352b1c2172 100644 --- a/les/downloader/downloader.go +++ b/les/downloader/downloader.go @@ -1625,7 +1625,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } - // All verifications passed, track all headers within the alloted limits + // All verifications passed, track all headers within the allotted limits if mode == FastSync { head := chunk[len(chunk)-1].Number.Uint64() if head-rollback > uint64(fsHeaderSafetyNet) { @@ -1663,7 +1663,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } d.syncStatsLock.Unlock() - // Signal the content downloaders of the availablility of new tasks + // Signal the content downloaders of the availability of new tasks for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { select { case ch <- true: diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go index 5eea4987796..c56870ff178 100644 --- a/les/downloader/downloader_test.go +++ b/les/downloader/downloader_test.go @@ -653,8 +653,8 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) } -// Tests that synchronising against a much shorter but much heavyer fork works -// corrently and is not dropped. +// Tests that synchronising against a much shorter but much heavier fork works +// correctly and is not dropped. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) } func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } diff --git a/les/downloader/queue.go b/les/downloader/queue.go index 5744b3ee279..b165b6b5c1c 100644 --- a/les/downloader/queue.go +++ b/les/downloader/queue.go @@ -872,7 +872,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil { reconstruct(accepted, res) } else { - // else: betweeen here and above, some other peer filled this result, + // else: between here and above, some other peer filled this result, // or it was indeed a no-op. This should not happen, but if it does it's // not something to panic about log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) diff --git a/les/fetcher.go b/les/fetcher.go index cf62c8f7077..6861eebcf5e 100644 --- a/les/fetcher.go +++ b/les/fetcher.go @@ -252,7 +252,7 @@ func (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool) // request will be made for header retrieval. // // - re-sync trigger -// If the local chain lags too much, then the fetcher will enter "synnchronise" +// If the local chain lags too much, then the fetcher will enter "synchronise" // mode to retrieve missing headers in batch. func (f *lightFetcher) mainloop() { defer f.wg.Done() diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go index 4ffbee58f0d..4367974d632 100644 --- a/les/flowcontrol/manager.go +++ b/les/flowcontrol/manager.go @@ -55,7 +55,7 @@ var ( // ClientManager controls the capacity assigned to the clients of a server. // Since ServerParams guarantee a safe lower estimate for processable requests // even in case of all clients being active, ClientManager calculates a -// corrigated buffer value and usually allows a higher remaining buffer value +// corrugated buffer value and usually allows a higher remaining buffer value // to be returned with each reply. type ClientManager struct { clock mclock.Clock diff --git a/les/odr.go b/les/odr.go index 10ff0854d38..2643a534787 100644 --- a/les/odr.go +++ b/les/odr.go @@ -126,7 +126,7 @@ const ( // RetrieveTxStatus retrieves the transaction status from the LES network. // There is no guarantee in the LES protocol that the mined transaction will // be retrieved back for sure because of different reasons(the transaction -// is unindexed, the malicous server doesn't reply it deliberately, etc). +// is unindexed, the malicious server doesn't reply it deliberately, etc). // Therefore, unretrieved transactions(UNKNOWN) will receive a certain number // of retries, thus giving a weak guarantee. func (odr *LesOdr) RetrieveTxStatus(ctx context.Context, req *light.TxStatusRequest) error { diff --git a/les/vflux/client/fillset_test.go b/les/vflux/client/fillset_test.go index ca5af8f07ec..ddb12a82f9b 100644 --- a/les/vflux/client/fillset_test.go +++ b/les/vflux/client/fillset_test.go @@ -104,7 +104,7 @@ func TestFillSet(t *testing.T) { fs.SetTarget(10) expWaiting(4, true) expNotWaiting() - // remove all previosly set flags + // remove all previously set flags ns.ForEach(sfTest1, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { ns.SetState(node, nodestate.Flags{}, sfTest1, 0) }) diff --git a/les/vflux/client/serverpool_test.go b/les/vflux/client/serverpool_test.go index 9f83c5f7f2c..f1fd987d7ed 100644 --- a/les/vflux/client/serverpool_test.go +++ b/les/vflux/client/serverpool_test.go @@ -66,7 +66,7 @@ type ServerPoolTest struct { // (accessed from both the main thread and the preNeg callback) preNegLock sync.Mutex queryWg *sync.WaitGroup // a new wait group is created each time the simulation is started - stopping bool // stopping avoid callind queryWg.Add after queryWg.Wait + stopping bool // stopping avoid calling queryWg.Add after queryWg.Wait cycle, conn, servedConn int serviceCycles, dialCount int diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 550c6d70ca8..b09f7bb5012 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -623,7 +623,7 @@ func (n *nodeBalance) priorityToBalance(priority int64, capacity uint64) (uint64 return 0, uint64(-priority) } -// reducedBalance estimates the reduced balance at a given time in the fututre based +// reducedBalance estimates the reduced balance at a given time in the future based // on the given balance, the time factor and an estimated average request cost per time ratio func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { // since the costs are applied continuously during the dt time period we calculate diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index 9f253cabf48..7c100aab509 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -54,7 +54,7 @@ func newBalanceTestSetup(db ethdb.KeyValueStore, posExp, negExp utils.ValueExpir // Initialize and customize the setup for the balance testing clock := &mclock.Simulated{} setup := newServerSetup() - setup.clientField = setup.setup.NewField("balancTestClient", reflect.TypeOf(balanceTestClient{})) + setup.clientField = setup.setup.NewField("balanceTestClient", reflect.TypeOf(balanceTestClient{})) ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) if posExp == nil { @@ -298,7 +298,7 @@ func TestEstimatedPriority(t *testing.T) { } } -func TestPostiveBalanceCounting(t *testing.T) { +func TestPositiveBalanceCounting(t *testing.T) { b := newBalanceTestSetup(nil, nil, nil) defer b.stop() diff --git a/les/vflux/server/status.go b/les/vflux/server/status.go index 469190777b2..2d7e25b6846 100644 --- a/les/vflux/server/status.go +++ b/les/vflux/server/status.go @@ -41,7 +41,7 @@ type serverSetup struct { activeFlag nodestate.Flags // Flag is set if the node is active inactiveFlag nodestate.Flags // Flag is set if the node is inactive capacityField nodestate.Field // Field contains the capacity of the node - queueField nodestate.Field // Field contains the infomration in the priority queue + queueField nodestate.Field // Field contains the information in the priority queue } // newServerSetup initializes the setup for state machine and returns the flags/fields group. diff --git a/light/lightchain.go b/light/lightchain.go index 2a8e3672145..dca97ce45ce 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -397,7 +397,7 @@ func (lc *LightChain) SetCanonical(header *types.Header) error { // // The verify parameter can be used to fine tune whether nonce verification // should be done or not. The reason behind the optional check is because some -// of the header retrieval mechanisms already need to verfy nonces, as well as +// of the header retrieval mechanisms already need to verify nonces, as well as // because nonces can be verified sparsely, not needing to check each. // // In the case of a light chain, InsertHeaderChain also creates and posts light diff --git a/light/odr_util.go b/light/odr_util.go index bbbcdbce213..48631139b48 100644 --- a/light/odr_util.go +++ b/light/odr_util.go @@ -272,9 +272,9 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bit uint, sections []uint // GetTransaction retrieves a canonical transaction by hash and also returns // its position in the chain. There is no guarantee in the LES protocol that // the mined transaction will be retrieved back for sure because of different -// reasons(the transaction is unindexed, the malicous server doesn't reply it +// reasons(the transaction is unindexed, the malicious server doesn't reply it // deliberately, etc). Therefore, unretrieved transactions will receive a certain -// number of retrys, thus giving a weak guarantee. +// number of retries, thus giving a weak guarantee. func GetTransaction(ctx context.Context, odr OdrBackend, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { r := &TxStatusRequest{Hashes: []common.Hash{txHash}} if err := odr.RetrieveTxStatus(ctx, r); err != nil || r.Status[0].Status != core.TxStatusIncluded { diff --git a/light/postprocess.go b/light/postprocess.go index 0e50dab9671..3f9da659333 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -313,7 +313,7 @@ var ( BloomTrieTablePrefix = "blt-" ) -// GetBloomTrieRoot reads the BloomTrie root assoctiated to the given section from the database +// GetBloomTrieRoot reads the BloomTrie root associated to the given section from the database func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { var encNumber [8]byte binary.BigEndian.PutUint64(encNumber[:], sectionIdx) @@ -321,7 +321,7 @@ func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.H return common.BytesToHash(data) } -// StoreBloomTrieRoot writes the BloomTrie root assoctiated to the given section into the database +// StoreBloomTrieRoot writes the BloomTrie root associated to the given section into the database func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { var encNumber [8]byte binary.BigEndian.PutUint64(encNumber[:], sectionIdx) diff --git a/light/trie.go b/light/trie.go index f60edaa3b17..b88265e87d4 100644 --- a/light/trie.go +++ b/light/trie.go @@ -153,7 +153,7 @@ func (t *odrTrie) TryDelete(key []byte) error { }) } -// TryDeleteACcount abstracts an account deletion from the trie. +// TryDeleteAccount abstracts an account deletion from the trie. func (t *odrTrie) TryDeleteAccount(key []byte) error { key = crypto.Keccak256(key) return t.do(key, func() error { diff --git a/light/txpool.go b/light/txpool.go index 413337208b8..b3e1a62e189 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -71,7 +71,7 @@ type TxPool struct { eip2718 bool // Fork indicator whether we are in the eip2718 stage. } -// TxRelayBackend provides an interface to the mechanism that forwards transacions +// TxRelayBackend provides an interface to the mechanism that forwards transactions // to the ETH network. The implementations of the functions should be non-blocking. // // Send instructs backend to forward new transactions diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go index 02b75580c4e..7b854d232ba 100644 --- a/metrics/gauge_float64_test.go +++ b/metrics/gauge_float64_test.go @@ -2,7 +2,7 @@ package metrics import "testing" -func BenchmarkGuageFloat64(b *testing.B) { +func BenchmarkGaugeFloat64(b *testing.B) { g := NewGaugeFloat64() b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/metrics/gauge_test.go b/metrics/gauge_test.go index 3aee143455c..a98fe985d8c 100644 --- a/metrics/gauge_test.go +++ b/metrics/gauge_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -func BenchmarkGuage(b *testing.B) { +func BenchmarkGauge(b *testing.B) { g := NewGauge() b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go index 9ad5ec7e992..c8408d8cab8 100644 --- a/metrics/prometheus/prometheus.go +++ b/metrics/prometheus/prometheus.go @@ -36,7 +36,7 @@ func Handler(reg metrics.Registry) http.Handler { }) sort.Strings(names) - // Aggregate all the metris into a Prometheus collector + // Aggregate all the metrics into a Prometheus collector c := newCollector() for _, name := range names { diff --git a/miner/unconfirmed_test.go b/miner/unconfirmed_test.go index dc83cb92652..60958f658ab 100644 --- a/miner/unconfirmed_test.go +++ b/miner/unconfirmed_test.go @@ -74,7 +74,7 @@ func TestUnconfirmedShifts(t *testing.T) { if n := pool.blocks.Len(); n != int(limit)/2 { t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit/2) } - // Try to shift all the remaining blocks out and verify emptyness + // Try to shift all the remaining blocks out and verify emptiness pool.Shift(start + 2*uint64(limit)) if n := pool.blocks.Len(); n != 0 { t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0) diff --git a/miner/worker_test.go b/miner/worker_test.go index bda0fd4899b..ec5ba67e1c6 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -494,7 +494,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co } w.start() - time.Sleep(time.Second) // Ensure two tasks have been summitted due to start opt + time.Sleep(time.Second) // Ensure two tasks have been submitted due to start opt atomic.StoreUint32(&start, 1) w.setRecommitInterval(3 * time.Second) diff --git a/mobile/accounts.go b/mobile/accounts.go index 4d979bffff5..d9eab93a741 100644 --- a/mobile/accounts.go +++ b/mobile/accounts.go @@ -212,10 +212,10 @@ func (ks *KeyStore) ImportECDSAKey(key []byte, passphrase string) (account *Acco // ImportPreSaleKey decrypts the given Ethereum presale wallet and stores // a key file in the key directory. The key file is encrypted with the same passphrase. -func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (ccount *Account, _ error) { - account, err := ks.keystore.ImportPreSaleKey(common.CopyBytes(keyJSON), passphrase) +func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account *Account, _ error) { + acc, err := ks.keystore.ImportPreSaleKey(common.CopyBytes(keyJSON), passphrase) if err != nil { return nil, err } - return &Account{account}, nil + return &Account{acc}, nil } diff --git a/mobile/init.go b/mobile/init.go index 2025d85edc9..94f5baf28be 100644 --- a/mobile/init.go +++ b/mobile/init.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Contains initialization code for the mbile library. +// Contains initialization code for the mobile library. package geth diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index 6fb16c504a9..09acf7ea045 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -100,7 +100,7 @@ func TestWebsocketOrigins(t *testing.T) { expFail: []string{ "test", // no scheme, required by spec "http://test", // wrong scheme - "http://test.foo", "https://a.test.x", // subdomain variatoins + "http://test.foo", "https://a.test.x", // subdomain variations "http://testx:8540", "https://xtest:8540"}, }, // ip tests diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 95a6df8e1bd..67cd2c004cf 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -525,7 +525,7 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) { t.log.Debug("Temporary UDP read error", "err", err) continue } else if err != nil { - // Shut down the loop for permament errors. + // Shut down the loop for permanent errors. if !errors.Is(err, io.EOF) { t.log.Debug("UDP read error", "err", err) } diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index 6ffa7bef7e1..071ed65adc7 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -625,7 +625,7 @@ func (t *UDPv5) readLoop() { t.log.Debug("Temporary UDP read error", "err", err) continue } else if err != nil { - // Shut down the loop for permament errors. + // Shut down the loop for permanent errors. if !errors.Is(err, io.EOF) { t.log.Debug("UDP read error", "err", err) } diff --git a/p2p/msgrate/msgrate.go b/p2p/msgrate/msgrate.go index 5bfa27b4337..d4e0eb8b5aa 100644 --- a/p2p/msgrate/msgrate.go +++ b/p2p/msgrate/msgrate.go @@ -111,7 +111,7 @@ const tuningImpact = 0.25 // local link is saturated. In that case, the live measurements will force us // to reduce request sizes until the throughput gets stable. // -// Lastly, message rate measurements allows us to detect if a peer is unsuaully +// Lastly, message rate measurements allows us to detect if a peer is unusually // slow compared to other peers, in which case we can decide to keep it around // or free up the slot so someone closer. // @@ -127,7 +127,7 @@ type Tracker struct { // in their sizes. // // Callers of course are free to use the item counter as a byte counter if - // or when their protocol of choise if capped by bytes instead of items. + // or when their protocol of choice if capped by bytes instead of items. // (eg. eth.getHeaders vs snap.getAccountRange). capacity map[uint64]float64 @@ -157,7 +157,7 @@ func NewTracker(caps map[uint64]float64, rtt time.Duration) *Tracker { } // Capacity calculates the number of items the peer is estimated to be able to -// retrieve within the alloted time slot. The method will round up any division +// retrieve within the allotted time slot. The method will round up any division // errors and will add an additional overestimation ratio on top. The reason for // overshooting the capacity is because certain message types might not increase // the load proportionally to the requested items, so fetching a bit more might diff --git a/p2p/tracker/tracker.go b/p2p/tracker/tracker.go index 69a49087e2c..6a733b9ba51 100644 --- a/p2p/tracker/tracker.go +++ b/p2p/tracker/tracker.go @@ -121,7 +121,7 @@ func (t *Tracker) Track(peer string, version uint, reqCode uint64, resCode uint6 } // clean is called automatically when a preset time passes without a response -// being dleivered for the first network request. +// being delivered for the first network request. func (t *Tracker) clean() { t.lock.Lock() defer t.lock.Unlock() diff --git a/rpc/server.go b/rpc/server.go index babc5688e26..bf1f71a28e2 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -160,7 +160,7 @@ type PeerInfo struct { // Address of client. This will usually contain the IP address and port. RemoteAddr string - // Addditional information for HTTP and WebSocket connections. + // Additional information for HTTP and WebSocket connections. HTTP struct { // Protocol version, i.e. "HTTP/1.1". This is not set for WebSocket. Version string diff --git a/signer/rules/rules_test.go b/signer/rules/rules_test.go index 32901e2ff14..c35da8ecc18 100644 --- a/signer/rules/rules_test.go +++ b/signer/rules/rules_test.go @@ -44,7 +44,7 @@ Three things can happen: 3. Anything else; other return values [*], method not implemented or exception occurred during processing. This means that the operation will continue to manual processing, via the regular UI method chosen by the user. -[*] Note: Future version of the ruleset may use more complex json-based returnvalues, making it possible to not +[*] Note: Future version of the ruleset may use more complex json-based return values, making it possible to not only respond Approve/Reject/Manual, but also modify responses. For example, choose to list only one, but not all accounts in a list-request. The points above will continue to hold for non-json based responses ("Approve"/"Reject"). @@ -242,7 +242,7 @@ func (d *dummyUI) OnApprovedTx(tx ethapi.SignTransactionResult) { func (d *dummyUI) OnSignerStartup(info core.StartupInfo) { } -//TestForwarding tests that the rule-engine correctly dispatches requests to the next caller +// TestForwarding tests that the rule-engine correctly dispatches requests to the next caller func TestForwarding(t *testing.T) { js := "" ui := &dummyUI{make([]string, 0)} @@ -434,7 +434,7 @@ func dummyTx(value hexutil.Big) *core.SignTxRequest { Gas: gas, }, Callinfo: []apitypes.ValidationInfo{ - {Typ: "Warning", Message: "All your base are bellong to us"}, + {Typ: "Warning", Message: "All your base are belong to us"}, }, Meta: core.Metadata{Remote: "remoteip", Local: "localip", Scheme: "inproc"}, } @@ -536,7 +536,7 @@ func (d *dontCallMe) OnApprovedTx(tx ethapi.SignTransactionResult) { d.t.Fatalf("Did not expect next-handler to be called") } -//TestContextIsCleared tests that the rule-engine does not retain variables over several requests. +// TestContextIsCleared tests that the rule-engine does not retain variables over several requests. // if it does, that would be bad since developers may rely on that to store data, // instead of using the disk-based data storage func TestContextIsCleared(t *testing.T) { diff --git a/signer/storage/aes_gcm_storage.go b/signer/storage/aes_gcm_storage.go index f09bfa7d4f0..928d643dd61 100644 --- a/signer/storage/aes_gcm_storage.go +++ b/signer/storage/aes_gcm_storage.go @@ -143,7 +143,7 @@ func (s *AESEncryptedStorage) writeEncryptedStorage(creds map[string]storedCrede // encrypt encrypts plaintext with the given key, with additional data // The 'additionalData' is used to place the (plaintext) KV-store key into the V, -// to prevent the possibility to alter a K, or swap two entries in the KV store with eachother. +// to prevent the possibility to alter a K, or swap two entries in the KV store with each other. func encrypt(key []byte, plaintext []byte, additionalData []byte) ([]byte, []byte, error) { block, err := aes.NewCipher(key) if err != nil { diff --git a/trie/hasher.go b/trie/hasher.go index 9e17d639fc9..183e96c229c 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -191,7 +191,7 @@ func (h *hasher) hashData(data []byte) hashNode { } // proofHash is used to construct trie proofs, and returns the 'collapsed' -// node (for later RLP encoding) aswell as the hashed node -- unless the +// node (for later RLP encoding) as well as the hashed node -- unless the // node is smaller than 32 bytes, in which case it will be returned as is. // This method does not do anything on value- or hash-nodes. func (h *hasher) proofHash(original node) (collapsed, hashed node) { diff --git a/trie/proof_test.go b/trie/proof_test.go index 8db035256e3..61667b20ab1 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -205,7 +205,7 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { proof := memorydb.New() // Short circuit if the decreased key is same with the previous key - first := decreseKey(common.CopyBytes(entries[start].k)) + first := decreaseKey(common.CopyBytes(entries[start].k)) if start != 0 && bytes.Equal(first, entries[start-1].k) { continue } @@ -214,7 +214,7 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { continue } // Short circuit if the increased key is same with the next key - last := increseKey(common.CopyBytes(entries[end-1].k)) + last := increaseKey(common.CopyBytes(entries[end-1].k)) if end != len(entries) && bytes.Equal(last, entries[end].k) { continue } @@ -274,7 +274,7 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { // Case 1 start, end := 100, 200 - first := decreseKey(common.CopyBytes(entries[start].k)) + first := decreaseKey(common.CopyBytes(entries[start].k)) proof := memorydb.New() if err := trie.Prove(first, 0, proof); err != nil { @@ -297,7 +297,7 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { // Case 2 start, end = 100, 200 - last := increseKey(common.CopyBytes(entries[end-1].k)) + last := increaseKey(common.CopyBytes(entries[end-1].k)) proof = memorydb.New() if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) @@ -343,7 +343,7 @@ func TestOneElementRangeProof(t *testing.T) { // One element with left non-existent edge proof start = 1000 - first := decreseKey(common.CopyBytes(entries[start].k)) + first := decreaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) @@ -358,7 +358,7 @@ func TestOneElementRangeProof(t *testing.T) { // One element with right non-existent edge proof start = 1000 - last := increseKey(common.CopyBytes(entries[start].k)) + last := increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) @@ -373,7 +373,7 @@ func TestOneElementRangeProof(t *testing.T) { // One element with two non-existent edge proofs start = 1000 - first, last = decreseKey(common.CopyBytes(entries[start].k)), increseKey(common.CopyBytes(entries[start].k)) + first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) @@ -641,9 +641,9 @@ func TestSameSideProofs(t *testing.T) { sort.Sort(entries) pos := 1000 - first := decreseKey(common.CopyBytes(entries[pos].k)) - first = decreseKey(first) - last := decreseKey(common.CopyBytes(entries[pos].k)) + first := decreaseKey(common.CopyBytes(entries[pos].k)) + first = decreaseKey(first) + last := decreaseKey(common.CopyBytes(entries[pos].k)) proof := memorydb.New() if err := trie.Prove(first, 0, proof); err != nil { @@ -657,9 +657,9 @@ func TestSameSideProofs(t *testing.T) { t.Fatalf("Expected error, got nil") } - first = increseKey(common.CopyBytes(entries[pos].k)) - last = increseKey(common.CopyBytes(entries[pos].k)) - last = increseKey(last) + first = increaseKey(common.CopyBytes(entries[pos].k)) + last = increaseKey(common.CopyBytes(entries[pos].k)) + last = increaseKey(last) proof = memorydb.New() if err := trie.Prove(first, 0, proof); err != nil { @@ -765,7 +765,7 @@ func TestEmptyRangeProof(t *testing.T) { } for _, c := range cases { proof := memorydb.New() - first := increseKey(common.CopyBytes(entries[c.pos].k)) + first := increaseKey(common.CopyBytes(entries[c.pos].k)) if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } @@ -904,7 +904,7 @@ func mutateByte(b []byte) { } } -func increseKey(key []byte) []byte { +func increaseKey(key []byte) []byte { for i := len(key) - 1; i >= 0; i-- { key[i]++ if key[i] != 0x0 { @@ -914,7 +914,7 @@ func increseKey(key []byte) []byte { return key } -func decreseKey(key []byte) []byte { +func decreaseKey(key []byte) []byte { for i := len(key) - 1; i >= 0; i-- { key[i]-- if key[i] != 0xff { diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index 05bddb21b3b..862c3a3ec43 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -121,7 +121,7 @@ func TestStateTrieConcurrency(t *testing.T) { for i := 0; i < threads; i++ { tries[i] = trie.Copy() } - // Start a batch of goroutines interactng with the trie + // Start a batch of goroutines interacting with the trie pend := new(sync.WaitGroup) pend.Add(threads) for i := 0; i < threads; i++ { From fa1305f8bf1b1e6c4eadf4d12c9dd8ce7ddc4c6c Mon Sep 17 00:00:00 2001 From: ucwong Date: Fri, 19 Aug 2022 14:01:09 +0800 Subject: [PATCH 05/26] internal/ethapi: fix comment typo (#25548) --- internal/ethapi/transaction_args.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 787ac65777e..e07248db5d6 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -238,7 +238,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t gasPrice = args.GasPrice.ToInt() gasFeeCap, gasTipCap = gasPrice, gasPrice } else { - // User specified 1559 gas feilds (or none), use those + // User specified 1559 gas fields (or none), use those gasFeeCap = new(big.Int) if args.MaxFeePerGas != nil { gasFeeCap = args.MaxFeePerGas.ToInt() From 32e849061559d2c15d8c9f8752317b3df7ed267d Mon Sep 17 00:00:00 2001 From: ucwong Date: Fri, 19 Aug 2022 14:01:43 +0800 Subject: [PATCH 06/26] accounts/abi/bind/backends: typo fix (#25549) --- accounts/abi/bind/backends/simulated.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index cd14afa1475..e03f2e1202b 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -609,7 +609,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM // User specified the legacy gas field, convert to 1559 gas typing call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice } else { - // User specified 1559 gas feilds (or none), use those + // User specified 1559 gas fields (or none), use those if call.GasFeeCap == nil { call.GasFeeCap = new(big.Int) } From 656dc8cc0034d3dd57b5e1baecb788d9c4e9929d Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Fri, 19 Aug 2022 01:02:47 -0500 Subject: [PATCH 07/26] eth, les: unlock downloader peerSet if there's an error (#25546) Unlock peerSet if there's an error in the downloader --- eth/downloader/peer.go | 1 + les/downloader/peer.go | 1 + 2 files changed, 2 insertions(+) diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index d74d23e74d5..6b826949594 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -237,6 +237,7 @@ func (ps *peerSet) Register(p *peerConnection) error { } p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) if err := ps.rates.Track(p.id, p.rates); err != nil { + ps.lock.Unlock() return err } ps.peers[p.id] = p diff --git a/les/downloader/peer.go b/les/downloader/peer.go index 5a92e9cf9b8..c2161e2dae4 100644 --- a/les/downloader/peer.go +++ b/les/downloader/peer.go @@ -350,6 +350,7 @@ func (ps *peerSet) Register(p *peerConnection) error { } p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) if err := ps.rates.Track(p.id, p.rates); err != nil { + ps.lock.Unlock() return err } ps.peers[p.id] = p From 9762ddf8b0e9d471600c99f158479912f4870c52 Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Fri, 19 Aug 2022 01:03:45 -0500 Subject: [PATCH 08/26] cmd/geth: parse uint64 value with ParseUint instead of Atoi (#25545) Parse uint64 value with ParseUint instead of Atoi --- cmd/geth/chaincmd.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 87863750615..a3016c4b091 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -384,12 +384,12 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) } } else { - number, err := strconv.Atoi(arg) + number, err := strconv.ParseUint(arg, 10, 64) if err != nil { return nil, nil, common.Hash{}, err } - if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { - header = rawdb.ReadHeader(db, hash, uint64(number)) + if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) { + header = rawdb.ReadHeader(db, hash, number) } else { return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) } From 77308cd6fceb99e276c1e2753d92ff82c6f1a962 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Fri, 19 Aug 2022 10:37:53 +0200 Subject: [PATCH 09/26] consensus/beacon: check ttd reached on pos blocks (#25552) * consensus/beacon: check ttd reached on pos blocks * consensus/beacon: check ttd reached on pos blocks * consensus/beacon: check ttd reached on pos blocks --- consensus/beacon/consensus.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 0397b026f1f..949c8ad816c 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -114,8 +114,16 @@ func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers [ } } - // All the headers have passed the transition point, use new rules. if len(preHeaders) == 0 { + // All the headers are pos headers. Verify that the parent block reached total terminal difficulty. + if reached, _ := IsTTDReached(chain, headers[0].ParentHash, headers[0].Number.Uint64()-1); !reached { + // TTD not reached for the first block, mark subsequent with invalid terminal block + results := make(chan error, len(headers)) + for i := 0; i < len(headers); i++ { + results <- consensus.ErrInvalidTerminalBlock + } + return make(chan struct{}), results + } return beacon.verifyHeaders(chain, headers, nil) } From 36874b63a1b56eed2b8e4b47ccea0337920b84b8 Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Fri, 19 Aug 2022 11:14:59 +0200 Subject: [PATCH 10/26] eth/filters: add global block logs cache (#25459) This adds a cache for block logs which is shared by all filters. The cache size of is configurable using the `--cache.blocklogs` flag. Co-authored-by: Felix Lange --- accounts/abi/bind/backends/simulated.go | 31 ++++---- cmd/geth/config.go | 11 ++- cmd/geth/main.go | 1 + cmd/utils/flags.go | 34 +++++++-- eth/api_backend.go | 14 +--- eth/backend.go | 5 -- eth/ethconfig/config.go | 4 + eth/ethconfig/gen_config.go | 6 ++ eth/filters/api.go | 20 ++--- eth/filters/bench_test.go | 16 ++-- eth/filters/filter.go | 97 +++++++++++-------------- eth/filters/filter_system.go | 85 +++++++++++++++++++++- eth/filters/filter_system_test.go | 77 +++++++++----------- eth/filters/filter_test.go | 20 ++--- ethclient/gethclient/gethclient_test.go | 7 ++ graphql/graphql.go | 39 ++++++++-- graphql/graphql_test.go | 9 ++- graphql/service.go | 9 ++- internal/ethapi/backend.go | 14 ++-- les/api_backend.go | 7 +- les/client.go | 4 - 21 files changed, 310 insertions(+), 200 deletions(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index e03f2e1202b..0ce752103c9 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -68,7 +68,8 @@ type SimulatedBackend struct { pendingState *state.StateDB // Currently pending state that will be the active on request pendingReceipts types.Receipts // Currently receipts for the pending block - events *filters.EventSystem // Event system for filtering log events live + events *filters.EventSystem // for filtering log events live + filterSystem *filters.FilterSystem // for filtering database logs config *params.ChainConfig } @@ -86,7 +87,11 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis blockchain: blockchain, config: genesis.Config, } - backend.events = filters.NewEventSystem(&filterBackend{database, blockchain, backend}, false) + + filterBackend := &filterBackend{database, blockchain, backend} + backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) + backend.events = filters.NewEventSystem(backend.filterSystem, false) + backend.rollback(blockchain.CurrentBlock()) return backend } @@ -689,7 +694,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter var filter *filters.Filter if query.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain, b}, *query.BlockHash, query.Addresses, query.Topics) + filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics) } else { // Initialize unset filter boundaries to run from genesis to chain head from := int64(0) @@ -701,7 +706,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter to = query.ToBlock.Int64() } // Construct the range filter - filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain, b}, from, to, query.Addresses, query.Topics) + filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -827,7 +832,8 @@ type filterBackend struct { backend *SimulatedBackend } -func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } +func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } + func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") } func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) { @@ -853,19 +859,8 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil } -func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - number := rawdb.ReadHeaderNumber(fb.db, hash) - if number == nil { - return nil, nil - } - receipts := rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()) - if receipts == nil { - return nil, nil - } - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - logs[i] = receipt.Logs - } +func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config()) return logs, nil } diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 2562de8ae9e..30565fda618 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -163,7 +163,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { override := ctx.Bool(utils.OverrideTerminalTotalDifficultyPassed.Name) cfg.Eth.OverrideTerminalTotalDifficultyPassed = &override } + backend, eth := utils.RegisterEthService(stack, &cfg.Eth) + // Warn users to migrate if they have a legacy freezer format. if eth != nil && !ctx.IsSet(utils.IgnoreLegacyReceiptsFlag.Name) { firstIdx := uint64(0) @@ -181,10 +183,15 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { utils.Fatalf("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.") } } - // Configure GraphQL if requested + + // Configure log filter RPC API. + filterSystem := utils.RegisterFilterAPI(stack, backend, &cfg.Eth) + + // Configure GraphQL if requested. if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { - utils.RegisterGraphQLService(stack, backend, cfg.Node) + utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node) } + // Add the Ethereum Stats daemon if requested. if cfg.Ethstats.URL != "" { utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index c0f636fb26f..b9e3ed31e81 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -117,6 +117,7 @@ var ( utils.CacheSnapshotFlag, utils.CacheNoPrefetchFlag, utils.CachePreimagesFlag, + utils.CacheLogSizeFlag, utils.FDLimitFlag, utils.ListenPortFlag, utils.DiscoveryPortFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index efea9349250..9e95193343a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -43,6 +43,7 @@ import ( ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" @@ -64,6 +65,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" pcsclite "github.com/gballet/go-libpcsclite" gopsutil "github.com/shirou/gopsutil/mem" "github.com/urfave/cli/v2" @@ -491,6 +493,12 @@ var ( Usage: "Enable recording the SHA3/keccak preimages of trie keys", Category: flags.PerfCategory, } + CacheLogSizeFlag = &cli.IntFlag{ + Name: "cache.blocklogs", + Usage: "Size (in number of blocks) of the log cache for filtering", + Category: flags.PerfCategory, + Value: ethconfig.Defaults.FilterLogCacheSize, + } FDLimitFlag = &cli.IntFlag{ Name: "fdlimit", Usage: "Raise the open file descriptor resource limit (default = system fd limit)", @@ -1808,6 +1816,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheSnapshotFlag.Name) { cfg.SnapshotCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheSnapshotFlag.Name) / 100 } + if ctx.IsSet(CacheLogSizeFlag.Name) { + cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name) + } if !ctx.Bool(SnapshotFlag.Name) { // If snap-sync is requested, this flag is also required if cfg.SyncMode == downloader.SnapSync { @@ -2005,21 +2016,34 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend return backend.APIBackend, backend } -// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to -// the given node. +// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to the node. func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) { if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil { Fatalf("Failed to register the Ethereum Stats service: %v", err) } } -// RegisterGraphQLService is a utility function to construct a new service and register it against a node. -func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.Config) { - if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil { +// RegisterGraphQLService adds the GraphQL API to the node. +func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cfg *node.Config) { + err := graphql.New(stack, backend, filterSystem, cfg.GraphQLCors, cfg.GraphQLVirtualHosts) + if err != nil { Fatalf("Failed to register the GraphQL service: %v", err) } } +// RegisterFilterAPI adds the eth log filtering RPC API to the node. +func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem { + isLightClient := ethcfg.SyncMode == downloader.LightSync + filterSystem := filters.NewFilterSystem(backend, filters.Config{ + LogCacheSize: ethcfg.FilterLogCacheSize, + }) + stack.RegisterAPIs([]rpc.API{{ + Namespace: "eth", + Service: filters.NewFilterAPI(filterSystem, isLightClient), + }}) + return filterSystem +} + func SetupMetrics(ctx *cli.Context) { if metrics.Enabled { log.Info("Enabling metrics collection") diff --git a/eth/api_backend.go b/eth/api_backend.go index 1d8ba8ea5ca..00ecacc31df 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -19,7 +19,6 @@ package eth import ( "context" "errors" - "fmt" "math/big" "time" @@ -202,17 +201,8 @@ func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (type return b.eth.blockchain.GetReceiptsByHash(hash), nil } -func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - db := b.eth.ChainDb() - number := rawdb.ReadHeaderNumber(db, hash) - if number == nil { - return nil, fmt.Errorf("failed to get block number for hash %#x", hash) - } - logs := rawdb.ReadLogs(db, hash, *number, b.eth.blockchain.Config()) - if logs == nil { - return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", *number, hash.TerminalString()) - } - return logs, nil +func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + return rawdb.ReadLogs(b.eth.chainDb, hash, number, b.ChainConfig()), nil } func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { diff --git a/eth/backend.go b/eth/backend.go index ebe7001c799..77820763634 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -25,7 +25,6 @@ import ( "strings" "sync" "sync/atomic" - "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -41,7 +40,6 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" @@ -315,9 +313,6 @@ func (s *Ethereum) APIs() []rpc.API { }, { Namespace: "eth", Service: downloader.NewDownloaderAPI(s.handler.downloader, s.eventMux), - }, { - Namespace: "eth", - Service: filters.NewFilterAPI(s.APIBackend, false, 5*time.Minute), }, { Namespace: "admin", Service: NewAdminAPI(s), diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index d1323de7b0f..5690366421d 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -83,6 +83,7 @@ var Defaults = Config{ TrieDirtyCache: 256, TrieTimeout: 60 * time.Minute, SnapshotCache: 102, + FilterLogCacheSize: 32, Miner: miner.Config{ GasCeil: 30000000, GasPrice: big.NewInt(params.GWei), @@ -171,6 +172,9 @@ type Config struct { SnapshotCache int Preimages bool + // This is the number of blocks for which logs will be cached in the filter system. + FilterLogCacheSize int + // Mining options Miner miner.Config diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index a6528c8df5f..9c7a04364d2 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -48,6 +48,7 @@ func (c Config) MarshalTOML() (interface{}, error) { TrieTimeout time.Duration SnapshotCache int Preimages bool + FilterLogCacheSize int Miner miner.Config Ethash ethash.Config TxPool core.TxPoolConfig @@ -93,6 +94,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.TrieTimeout = c.TrieTimeout enc.SnapshotCache = c.SnapshotCache enc.Preimages = c.Preimages + enc.FilterLogCacheSize = c.FilterLogCacheSize enc.Miner = c.Miner enc.Ethash = c.Ethash enc.TxPool = c.TxPool @@ -142,6 +144,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { TrieTimeout *time.Duration SnapshotCache *int Preimages *bool + FilterLogCacheSize *int Miner *miner.Config Ethash *ethash.Config TxPool *core.TxPoolConfig @@ -250,6 +253,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.Preimages != nil { c.Preimages = *dec.Preimages } + if dec.FilterLogCacheSize != nil { + c.FilterLogCacheSize = *dec.FilterLogCacheSize + } if dec.Miner != nil { c.Miner = *dec.Miner } diff --git a/eth/filters/api.go b/eth/filters/api.go index 3b8933d0af9..43e63d5ba98 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -46,7 +46,7 @@ type filter struct { // FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various // information related to the Ethereum protocol such als blocks, transactions and logs. type FilterAPI struct { - backend Backend + sys *FilterSystem events *EventSystem filtersMu sync.Mutex filters map[rpc.ID]*filter @@ -54,14 +54,14 @@ type FilterAPI struct { } // NewFilterAPI returns a new FilterAPI instance. -func NewFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *FilterAPI { +func NewFilterAPI(system *FilterSystem, lightMode bool) *FilterAPI { api := &FilterAPI{ - backend: backend, - events: NewEventSystem(backend, lightMode), + sys: system, + events: NewEventSystem(system, lightMode), filters: make(map[rpc.ID]*filter), - timeout: timeout, + timeout: system.cfg.Timeout, } - go api.timeoutLoop(timeout) + go api.timeoutLoop(system.cfg.Timeout) return api } @@ -320,7 +320,7 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type var filter *Filter if crit.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics) + filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics) } else { // Convert the RPC block numbers into internal representations begin := rpc.LatestBlockNumber.Int64() @@ -332,7 +332,7 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type end = crit.ToBlock.Int64() } // Construct the range filter - filter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics) + filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -371,7 +371,7 @@ func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Lo var filter *Filter if f.crit.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics) + filter = api.sys.NewBlockFilter(*f.crit.BlockHash, f.crit.Addresses, f.crit.Topics) } else { // Convert the RPC block numbers into internal representations begin := rpc.LatestBlockNumber.Int64() @@ -383,7 +383,7 @@ func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Lo end = f.crit.ToBlock.Int64() } // Construct the range filter - filter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics) + filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go index 694d7373502..73b96b77af6 100644 --- a/eth/filters/bench_test.go +++ b/eth/filters/bench_test.go @@ -122,22 +122,27 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { b.Log("Running filter benchmarks...") start = time.Now() - var backend *testBackend + var ( + backend *testBackend + sys *FilterSystem + ) for i := 0; i < benchFilterCnt; i++ { if i%20 == 0 { db.Close() db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) backend = &testBackend{db: db, sections: cnt} + sys = NewFilterSystem(backend, Config{}) } var addr common.Address addr[0] = byte(i) addr[1] = byte(i / 256) - filter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) + filter := sys.NewRangeFilter(0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) if _, err := filter.Logs(context.Background()); err != nil { - b.Error("filter.Find error:", err) + b.Error("filter.Logs error:", err) } } + d = time.Since(start) b.Log("Finished running filter benchmarks") b.Log(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks") @@ -171,10 +176,11 @@ func BenchmarkNoBloomBits(b *testing.B) { clearBloomBits(db) + _, sys := newTestFilterSystem(b, db, Config{}) + b.Log("Running filter benchmarks...") start := time.Now() - backend := &testBackend{db: db} - filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil) + filter := sys.NewRangeFilter(0, int64(*headNum), []common.Address{{}}, nil) filter.Logs(context.Background()) d := time.Since(start) b.Log("Finished running filter benchmarks") diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 9ff7ab7f55e..0a70c9ece1d 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -22,37 +22,15 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rpc" ) -type Backend interface { - ChainDb() ethdb.Database - HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) - HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) - GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) - GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) - PendingBlockAndReceipts() (*types.Block, types.Receipts) - - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription - SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription - SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription - - BloomStatus() (uint64, uint64) - ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) -} - // Filter can be used to retrieve and filter logs. type Filter struct { - backend Backend + sys *FilterSystem - db ethdb.Database addresses []common.Address topics [][]common.Hash @@ -64,7 +42,7 @@ type Filter struct { // NewRangeFilter creates a new filter which uses a bloom filter on blocks to // figure out whether a particular block is interesting or not. -func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { +func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { // Flatten the address and topic filter clauses into a single bloombits filter // system. Since the bloombits are not positional, nil topics are permitted, // which get flattened into a nil byte slice. @@ -83,10 +61,10 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres } filters = append(filters, filter) } - size, _ := backend.BloomStatus() + size, _ := sys.backend.BloomStatus() // Create a generic filter and convert it into a range filter - filter := newFilter(backend, addresses, topics) + filter := newFilter(sys, addresses, topics) filter.matcher = bloombits.NewMatcher(size, filters) filter.begin = begin @@ -97,21 +75,20 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres // NewBlockFilter creates a new filter which directly inspects the contents of // a block to figure out whether it is interesting or not. -func NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { +func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { // Create a generic filter and convert it into a block filter - filter := newFilter(backend, addresses, topics) + filter := newFilter(sys, addresses, topics) filter.block = block return filter } // newFilter creates a generic filter that can either filter based on a block hash, // or based on range queries. The search criteria needs to be explicitly set. -func newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter { +func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter { return &Filter{ - backend: backend, + sys: sys, addresses: addresses, topics: topics, - db: backend.ChainDb(), } } @@ -120,14 +97,14 @@ func newFilter(backend Backend, addresses []common.Address, topics [][]common.Ha func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // If we're doing singleton block filtering, execute and return if f.block != (common.Hash{}) { - header, err := f.backend.HeaderByHash(ctx, f.block) + header, err := f.sys.backend.HeaderByHash(ctx, f.block) if err != nil { return nil, err } if header == nil { return nil, errors.New("unknown block") } - return f.blockLogs(ctx, header) + return f.blockLogs(ctx, header, false) } // Short-cut if all we care about is pending logs if f.begin == rpc.PendingBlockNumber.Int64() { @@ -137,7 +114,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { return f.pendingLogs() } // Figure out the limits of the filter range - header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + header, _ := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) if header == nil { return nil, nil } @@ -156,7 +133,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { var ( logs []*types.Log err error - size, sections = f.backend.BloomStatus() + size, sections = f.sys.backend.BloomStatus() ) if indexed := sections * size; indexed > uint64(f.begin) { if indexed > end { @@ -192,7 +169,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err } defer session.Close() - f.backend.ServiceFilter(ctx, session) + f.sys.backend.ServiceFilter(ctx, session) // Iterate over the matches until exhausted or context closed var logs []*types.Log @@ -211,11 +188,11 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err f.begin = int64(number) + 1 // Retrieve the suggested block and pull any truly matching logs - header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) + header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) if header == nil || err != nil { return logs, err } - found, err := f.checkMatches(ctx, header) + found, err := f.blockLogs(ctx, header, true) if err != nil { return logs, err } @@ -233,11 +210,11 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e var logs []*types.Log for ; f.begin <= int64(end); f.begin++ { - header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) + header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { return logs, err } - found, err := f.blockLogs(ctx, header) + found, err := f.blockLogs(ctx, header, false) if err != nil { return logs, err } @@ -247,34 +224,34 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e } // blockLogs returns the logs matching the filter criteria within a single block. -func (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { - if bloomFilter(header.Bloom, f.addresses, f.topics) { - found, err := f.checkMatches(ctx, header) +func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) { + // Fast track: no filtering criteria + if len(f.addresses) == 0 && len(f.topics) == 0 { + list, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) if err != nil { - return logs, err + return nil, err } - logs = append(logs, found...) + return flatten(list), nil + } else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) { + return f.checkMatches(ctx, header) } - return logs, nil + return nil, nil } // checkMatches checks if the receipts belonging to the given header contain any log events that // match the filter criteria. This function is called when the bloom filter signals a potential match. -func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { - // Get the logs of the block - logsList, err := f.backend.GetLogs(ctx, header.Hash()) +func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) { + logsList, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) if err != nil { return nil, err } - var unfiltered []*types.Log - for _, logs := range logsList { - unfiltered = append(unfiltered, logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) + + unfiltered := flatten(logsList) + logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics) if len(logs) > 0 { // We have matching logs, check if we need to resolve full logs via the light client if logs[0].TxHash == (common.Hash{}) { - receipts, err := f.backend.GetReceipts(ctx, header.Hash()) + receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash()) if err != nil { return nil, err } @@ -291,7 +268,7 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs [ // pendingLogs returns the logs matching the filter criteria within the pending block. func (f *Filter) pendingLogs() ([]*types.Log, error) { - block, receipts := f.backend.PendingBlockAndReceipts() + block, receipts := f.sys.backend.PendingBlockAndReceipts() if bloomFilter(block.Bloom(), f.addresses, f.topics) { var unfiltered []*types.Log for _, r := range receipts { @@ -376,3 +353,11 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo } return true } + +func flatten(list [][]*types.Log) []*types.Log { + var flat []*types.Log + for _, logs := range list { + flat = append(flat, logs...) + } + return flat +} diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index c1a1b408b7a..79a9b089f42 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -27,13 +27,90 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" + lru "github.com/hashicorp/golang-lru" ) +// Config represents the configuration of the filter system. +type Config struct { + LogCacheSize int // maximum number of cached blocks (default: 32) + Timeout time.Duration // how long filters stay active (default: 5min) +} + +func (cfg Config) withDefaults() Config { + if cfg.Timeout == 0 { + cfg.Timeout = 5 * time.Minute + } + if cfg.LogCacheSize == 0 { + cfg.LogCacheSize = 32 + } + return cfg +} + +type Backend interface { + ChainDb() ethdb.Database + HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) + HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) + GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) + GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) + PendingBlockAndReceipts() (*types.Block, types.Receipts) + + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription + SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription + SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription + + BloomStatus() (uint64, uint64) + ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) +} + +// FilterSystem holds resources shared by all filters. +type FilterSystem struct { + backend Backend + logsCache *lru.Cache + cfg *Config +} + +// NewFilterSystem creates a filter system. +func NewFilterSystem(backend Backend, config Config) *FilterSystem { + config = config.withDefaults() + + cache, err := lru.New(config.LogCacheSize) + if err != nil { + panic(err) + } + return &FilterSystem{ + backend: backend, + logsCache: cache, + cfg: &config, + } +} + +// cachedGetLogs loads block logs from the backend and caches the result. +func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { + cached, ok := sys.logsCache.Get(blockHash) + if ok { + return cached.([][]*types.Log), nil + } + + logs, err := sys.backend.GetLogs(ctx, blockHash, number) + if err != nil { + return nil, err + } + if logs == nil { + return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString()) + } + sys.logsCache.Add(blockHash, logs) + return logs, nil +} + // Type determines the kind of filter and is used to put the filter in to // the correct bucket when added. type Type byte @@ -84,6 +161,7 @@ type subscription struct { // subscription which match the subscription criteria. type EventSystem struct { backend Backend + sys *FilterSystem lightMode bool lastHead *types.Header @@ -110,9 +188,10 @@ type EventSystem struct { // // The returned manager has a loop that needs to be stopped with the Stop function // or by stopping the given mux. -func NewEventSystem(backend Backend, lightMode bool) *EventSystem { +func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem { m := &EventSystem{ - backend: backend, + sys: sys, + backend: sys.backend, lightMode: lightMode, install: make(chan *subscription), uninstall: make(chan *subscription), @@ -405,7 +484,7 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common. // Get the logs of the block ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - logsList, err := es.backend.GetLogs(ctx, header.Hash()) + logsList, err := es.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) if err != nil { return nil } diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index c7fc4331b22..51bda29b424 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -39,10 +39,6 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -var ( - deadline = 5 * time.Minute -) - type testBackend struct { db ethdb.Database sections uint64 @@ -91,17 +87,8 @@ func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types. return nil, nil } -func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - number := rawdb.ReadHeaderNumber(b.db, hash) - if number == nil { - return nil, nil - } - receipts := rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig) - - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - logs[i] = receipt.Logs - } +func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + logs := rawdb.ReadLogs(b.db, hash, number, params.TestChainConfig) return logs, nil } @@ -160,6 +147,12 @@ func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.Matc }() } +func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) { + backend := &testBackend{db: db} + sys := NewFilterSystem(backend, cfg) + return backend, sys +} + // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. // It creates multiple subscriptions: // - one at the start and should receive all posted chain events and a second (blockHashes) @@ -169,12 +162,12 @@ func TestBlockSubscription(t *testing.T) { t.Parallel() var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) - genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) - chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) - chainEvents = []core.ChainEvent{} + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) + chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) + chainEvents = []core.ChainEvent{} ) for _, blk := range chain { @@ -221,9 +214,9 @@ func TestPendingTxFilter(t *testing.T) { t.Parallel() var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) transactions = []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), @@ -276,9 +269,9 @@ func TestPendingTxFilter(t *testing.T) { // If not it must return an error. func TestLogFilterCreation(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) testCases = []struct { crit FilterCriteria @@ -323,9 +316,9 @@ func TestInvalidLogFilterCreation(t *testing.T) { t.Parallel() var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) ) // different situations where log filter creation should fail. @@ -346,8 +339,8 @@ func TestInvalidLogFilterCreation(t *testing.T) { func TestInvalidGetLogsRequest(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") ) @@ -370,9 +363,9 @@ func TestLogFilter(t *testing.T) { t.Parallel() var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -484,9 +477,9 @@ func TestPendingLogsSubscription(t *testing.T) { t.Parallel() var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, deadline) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -668,10 +661,10 @@ func TestPendingTxFilterDeadlock(t *testing.T) { timeout := 100 * time.Millisecond var ( - db = rawdb.NewMemoryDatabase() - backend = &testBackend{db: db} - api = NewFilterAPI(backend, false, timeout) - done = make(chan struct{}) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) + api = NewFilterAPI(sys, false) + done = make(chan struct{}) ) go func() { diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 59fdde7e809..2c1f7cadf43 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -44,7 +44,7 @@ func BenchmarkFilters(b *testing.B) { var ( db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) - backend = &testBackend{db: db} + _, sys = newTestFilterSystem(b, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = common.BytesToAddress([]byte("jeff")) @@ -89,7 +89,7 @@ func BenchmarkFilters(b *testing.B) { } b.ResetTimer() - filter := NewRangeFilter(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil) + filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil) for i := 0; i < b.N; i++ { logs, _ := filter.Logs(context.Background()) @@ -104,7 +104,7 @@ func TestFilters(t *testing.T) { var ( db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) - backend = &testBackend{db: db} + _, sys = newTestFilterSystem(t, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) @@ -175,14 +175,14 @@ func TestFilters(t *testing.T) { rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) } - filter := NewRangeFilter(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) + filter := sys.NewRangeFilter(0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) logs, _ := filter.Logs(context.Background()) if len(logs) != 4 { t.Error("expected 4 log, got", len(logs)) } - filter = NewRangeFilter(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}) + filter = sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 1 { t.Error("expected 1 log, got", len(logs)) @@ -191,7 +191,7 @@ func TestFilters(t *testing.T) { t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) } - filter = NewRangeFilter(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}}) + filter = sys.NewRangeFilter(990, -1, []common.Address{addr}, [][]common.Hash{{hash3}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 1 { t.Error("expected 1 log, got", len(logs)) @@ -200,7 +200,7 @@ func TestFilters(t *testing.T) { t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) } - filter = NewRangeFilter(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}}) + filter = sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 2 { @@ -208,7 +208,7 @@ func TestFilters(t *testing.T) { } failHash := common.BytesToHash([]byte("fail")) - filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}}) + filter = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 0 { @@ -216,14 +216,14 @@ func TestFilters(t *testing.T) { } failAddr := common.BytesToAddress([]byte("failmenow")) - filter = NewRangeFilter(backend, 0, -1, []common.Address{failAddr}, nil) + filter = sys.NewRangeFilter(0, -1, []common.Address{failAddr}, nil) logs, _ = filter.Logs(context.Background()) if len(logs) != 0 { t.Error("expected 0 log, got", len(logs)) } - filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}}) + filter = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}, {hash1}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 0 { diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go index b78d11c3283..a0f4eaaf5db 100644 --- a/ethclient/gethclient/gethclient_test.go +++ b/ethclient/gethclient/gethclient_test.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" @@ -60,6 +61,12 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) { if err != nil { t.Fatalf("can't create new ethereum service: %v", err) } + filterSystem := filters.NewFilterSystem(ethservice.APIBackend, filters.Config{}) + n.RegisterAPIs([]rpc.API{{ + Namespace: "eth", + Service: filters.NewFilterAPI(filterSystem, false), + }}) + // Import the test chain. if err := n.Start(); err != nil { t.Fatalf("can't start test node: %v", err) diff --git a/graphql/graphql.go b/graphql/graphql.go index 0949c34803c..97b460c205c 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -450,12 +450,36 @@ func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) } func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) { - receipt, err := t.getReceipt(ctx) - if err != nil || receipt == nil { + if _, err := t.resolve(ctx); err != nil { return nil, err } - ret := make([]*Log, 0, len(receipt.Logs)) - for _, log := range receipt.Logs { + if t.block == nil { + return nil, nil + } + if _, ok := t.block.numberOrHash.Hash(); !ok { + header, err := t.r.backend.HeaderByNumberOrHash(ctx, *t.block.numberOrHash) + if err != nil { + return nil, err + } + hash := header.Hash() + t.block.numberOrHash.BlockHash = &hash + } + return t.getLogs(ctx) +} + +// getLogs returns log objects for the given tx. +// Assumes block hash is resolved. +func (t *Transaction) getLogs(ctx context.Context) (*[]*Log, error) { + var ( + hash, _ = t.block.numberOrHash.Hash() + filter = t.r.filterSystem.NewBlockFilter(hash, nil, nil) + logs, err = filter.Logs(ctx) + ) + if err != nil { + return nil, err + } + ret := make([]*Log, 0, len(logs)) + for _, log := range logs { ret = append(ret, &Log{ r: t.r, transaction: t, @@ -978,7 +1002,7 @@ func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteri hash = header.Hash() } // Construct the range filter - filter := filters.NewBlockFilter(b.r.backend, hash, addresses, topics) + filter := b.r.filterSystem.NewBlockFilter(hash, addresses, topics) // Run the filter and return all the logs return runFilter(ctx, b.r, filter) @@ -1137,7 +1161,8 @@ func (p *Pending) EstimateGas(ctx context.Context, args struct { // Resolver is the top-level object in the GraphQL hierarchy. type Resolver struct { - backend ethapi.Backend + backend ethapi.Backend + filterSystem *filters.FilterSystem } func (r *Resolver) Block(ctx context.Context, args struct { @@ -1284,7 +1309,7 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria topics = *args.Filter.Topics } // Construct the range filter - filter := filters.NewRangeFilter(r.backend, begin, end, addresses, topics) + filter := r.filterSystem.NewRangeFilter(begin, end, addresses, topics) return runFilter(ctx, r, filter) } diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index 4b7f7bf9602..d55f4e06348 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" @@ -50,7 +51,7 @@ func TestBuildSchema(t *testing.T) { } defer stack.Close() // Make sure the schema can be parsed and matched up to the object model. - if err := newHandler(stack, nil, []string{}, []string{}); err != nil { + if err := newHandler(stack, nil, nil, []string{}, []string{}); err != nil { t.Errorf("Could not construct GraphQL handler: %v", err) } } @@ -263,7 +264,8 @@ func createGQLService(t *testing.T, stack *node.Node) { t.Fatalf("could not create import blocks: %v", err) } // create gql service - err = New(stack, ethBackend.APIBackend, []string{}, []string{}) + filterSystem := filters.NewFilterSystem(ethBackend.APIBackend, filters.Config{}) + err = New(stack, ethBackend.APIBackend, filterSystem, []string{}, []string{}) if err != nil { t.Fatalf("could not create graphql service: %v", err) } @@ -348,7 +350,8 @@ func createGQLServiceWithTransactions(t *testing.T, stack *node.Node) { t.Fatalf("could not create import blocks: %v", err) } // create gql service - err = New(stack, ethBackend.APIBackend, []string{}, []string{}) + filterSystem := filters.NewFilterSystem(ethBackend.APIBackend, filters.Config{}) + err = New(stack, ethBackend.APIBackend, filterSystem, []string{}, []string{}) if err != nil { t.Fatalf("could not create graphql service: %v", err) } diff --git a/graphql/service.go b/graphql/service.go index 1a2ffaa9469..019026bc7ea 100644 --- a/graphql/service.go +++ b/graphql/service.go @@ -20,6 +20,7 @@ import ( "encoding/json" "net/http" + "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/node" "github.com/graph-gophers/graphql-go" @@ -55,14 +56,14 @@ func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // New constructs a new GraphQL service instance. -func New(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error { - return newHandler(stack, backend, cors, vhosts) +func New(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cors, vhosts []string) error { + return newHandler(stack, backend, filterSystem, cors, vhosts) } // newHandler returns a new `http.Handler` that will answer GraphQL queries. // It additionally exports an interactive query browser on the / endpoint. -func newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error { - q := Resolver{backend} +func newHandler(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cors, vhosts []string) error { + q := Resolver{backend, filterSystem} s, err := graphql.ParseSchema(schema, &q) if err != nil { diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index d13547f234a..5b4ceb63106 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -27,10 +27,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/params" @@ -84,16 +84,12 @@ type Backend interface { TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription - // Filter API - BloomStatus() (uint64, uint64) - GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) - ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) - SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - ChainConfig() *params.ChainConfig Engine() consensus.Engine + + // eth/filters needs to be initialized from this backend type, so methods needed by + // it must also be included here. + filters.Backend } func GetAPIs(apiBackend Backend) []rpc.API { diff --git a/les/api_backend.go b/les/api_backend.go index 11a9ca128aa..5b4213134b2 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -168,11 +168,8 @@ func (b *LesApiBackend) GetReceipts(ctx context.Context, hash common.Hash) (type return nil, nil } -func (b *LesApiBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - if number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil { - return light.GetBlockLogs(ctx, b.eth.odr, hash, *number) - } - return nil, nil +func (b *LesApiBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + return light.GetBlockLogs(ctx, b.eth.odr, hash, number) } func (b *LesApiBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { diff --git a/les/client.go b/les/client.go index 7caaf2c18a5..6504fe2af8f 100644 --- a/les/client.go +++ b/les/client.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" @@ -298,9 +297,6 @@ func (s *LightEthereum) APIs() []rpc.API { }, { Namespace: "eth", Service: downloader.NewDownloaderAPI(s.handler.downloader, s.eventMux), - }, { - Namespace: "eth", - Service: filters.NewFilterAPI(s.ApiBackend, true, 5*time.Minute), }, { Namespace: "net", Service: s.netRPCService, From 08658806268f4c570c682e87681881ac35123fe9 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Fri, 19 Aug 2022 18:15:04 +0800 Subject: [PATCH 11/26] accounts/abi: fix set function (#25477) * accounts/abi: fix set function * don't break things * update test --- accounts/abi/reflect.go | 2 +- accounts/abi/unpack_test.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index eb21bb26451..7917fa98095 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -99,7 +99,7 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value { func set(dst, src reflect.Value) error { dstType, srcType := dst.Type(), src.Type() switch { - case dstType.Kind() == reflect.Interface && dst.Elem().IsValid(): + case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()): return set(dst.Elem(), src) case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}): return set(dst.Elem(), src) diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index ae3565c71e2..363e0cd5943 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -352,6 +352,11 @@ func TestMethodMultiReturn(t *testing.T) { &[]interface{}{&expected.Int, &expected.String}, "", "Can unpack into a slice", + }, { + &[]interface{}{&bigint, ""}, + &[]interface{}{&expected.Int, expected.String}, + "", + "Can unpack into a slice without indirection", }, { &[2]interface{}{&bigint, new(string)}, &[2]interface{}{&expected.Int, &expected.String}, From ac7ad811b4d2ba0b93f9272e2487253026620b48 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 19 Aug 2022 14:48:49 +0200 Subject: [PATCH 12/26] internal/ethapi: fix build regression (#25555) --- internal/ethapi/transaction_args_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 92f009aa849..28dc561c36e 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -298,6 +298,9 @@ func (b *backendMock) PendingBlockAndReceipts() (*types.Block, types.Receipts) { func (b *backendMock) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { return nil, nil } +func (b *backendMock) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { + return nil, nil +} func (b *backendMock) GetTd(ctx context.Context, hash common.Hash) *big.Int { return nil } func (b *backendMock) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { return nil, nil, nil @@ -325,11 +328,8 @@ func (b *backendMock) TxPoolContent() (map[common.Address]types.Transactions, ma func (b *backendMock) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { return nil, nil } -func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil } -func (b *backendMock) BloomStatus() (uint64, uint64) { return 0, 0 } -func (b *backendMock) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) { - return nil, nil -} +func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil } +func (b *backendMock) BloomStatus() (uint64, uint64) { return 0, 0 } func (b *backendMock) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {} func (b *backendMock) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return nil } func (b *backendMock) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { From 0ce494b60cd00d70f1f9f2dd0b9bfbd76204168a Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 19 Aug 2022 15:59:36 +0200 Subject: [PATCH 13/26] eth/fetcher: don't spend too much time on transaction inclusion (#25524) * eth/fetcher: introduce some lag in tx fetching * eth/fetcher: change conditions a bit * eth/fetcher: use per-batch quota check * eth/fetcher: fix some comments * eth/fetcher: address review concerns * eth/fetcher: fix panic + add warn log * eth/fetcher: fix log * eth/fetcher: fix log * cmd/devp2p/internal/ethtest: fix ignorign tx announcements from prev. tests * cmd/devp2p/internal/ethtest: fix TestLargeTxRequest This increases the number of tx relay messages the test waits for. Since go-ethereum now processes incoming txs in smaller batches, the announcement messages it sends are also smaller. Co-authored-by: Felix Lange --- cmd/devp2p/internal/ethtest/helpers.go | 6 +- cmd/devp2p/internal/ethtest/suite.go | 4 + cmd/devp2p/internal/ethtest/transaction.go | 6 +- eth/fetcher/tx_fetcher.go | 98 +++++++++++++--------- 4 files changed, 72 insertions(+), 42 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index eeeb4f93cab..b57649ade99 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -357,9 +357,13 @@ func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error { return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) } return nil + + // ignore tx announcements from previous tests case *NewPooledTransactionHashes: - // ignore tx announcements from previous tests continue + case *Transactions: + continue + default: return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) } diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 7059b4ba738..4497478d72d 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -544,9 +544,13 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) } return + // ignore propagated txs from previous tests case *NewPooledTransactionHashes: continue + case *Transactions: + continue + // ignore block announcements from previous tests case *NewBlockHashes: continue diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index c4748bf8f7d..baa55bd4926 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -29,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) -//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +// var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") func (s *Suite) sendSuccessfulTxs(t *utesting.T) error { @@ -192,10 +192,10 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction nonce = txs[len(txs)-1].Nonce() // Wait for the transaction announcement(s) and make sure all sent txs are being propagated. - // all txs should be announced within 3 announcements. + // all txs should be announced within a couple announcements. recvHashes := make([]common.Hash, 0) - for i := 0; i < 3; i++ { + for i := 0; i < 20; i++ { switch msg := recvConn.readAndServe(s.chain, timeout).(type) { case *Transactions: for _, tx := range *msg { diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 035e0c2ec7d..7c8f16df531 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -262,57 +262,79 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { // direct request replies. The differentiation is important so the fetcher can // re-schedule missing transactions as soon as possible. func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error { - // Keep track of all the propagated transactions - if direct { - txReplyInMeter.Mark(int64(len(txs))) - } else { - txBroadcastInMeter.Mark(int64(len(txs))) + var ( + inMeter = txReplyInMeter + knownMeter = txReplyKnownMeter + underpricedMeter = txReplyUnderpricedMeter + otherRejectMeter = txReplyOtherRejectMeter + ) + if !direct { + inMeter = txBroadcastInMeter + knownMeter = txBroadcastKnownMeter + underpricedMeter = txBroadcastUnderpricedMeter + otherRejectMeter = txBroadcastOtherRejectMeter } + // Keep track of all the propagated transactions + inMeter.Mark(int64(len(txs))) + // Push all the transactions into the pool, tracking underpriced ones to avoid // re-requesting them and dropping the peer in case of malicious transfers. var ( - added = make([]common.Hash, 0, len(txs)) - duplicate int64 - underpriced int64 - otherreject int64 + added = make([]common.Hash, 0, len(txs)) + delay time.Duration ) - errs := f.addTxs(txs) - for i, err := range errs { - // Track the transaction hash if the price is too low for us. - // Avoid re-request this transaction when we receive another - // announcement. - if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) { - for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { - f.underpriced.Pop() - } - f.underpriced.Add(txs[i].Hash()) + // proceed in batches + for i := 0; i < len(txs); i += 128 { + end := i + 128 + if end > len(txs) { + end = len(txs) } - // Track a few interesting failure types - switch { - case err == nil: // Noop, but need to handle to not count these + var ( + duplicate int64 + underpriced int64 + otherreject int64 + ) + batch := txs[i:end] + for j, err := range f.addTxs(batch) { + // Track the transaction hash if the price is too low for us. + // Avoid re-request this transaction when we receive another + // announcement. + if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) { + for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { + f.underpriced.Pop() + } + f.underpriced.Add(batch[j].Hash()) + } + // Track a few interesting failure types + switch { + case err == nil: // Noop, but need to handle to not count these - case errors.Is(err, core.ErrAlreadyKnown): - duplicate++ + case errors.Is(err, core.ErrAlreadyKnown): + duplicate++ - case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced): - underpriced++ + case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced): + underpriced++ - default: - otherreject++ + default: + otherreject++ + } + added = append(added, batch[j].Hash()) + } + knownMeter.Mark(duplicate) + underpricedMeter.Mark(underpriced) + otherRejectMeter.Mark(otherreject) + + // If 'other reject' is >25% of the deliveries in any batch, abort. Either we are + // out of sync with the chain or the peer is griefing us. + if otherreject > 128/4 { + delay = 200 * time.Millisecond + log.Warn("Peer delivering useless transactions", "peer", peer, "ignored", len(txs)-end) + break } - added = append(added, txs[i].Hash()) - } - if direct { - txReplyKnownMeter.Mark(duplicate) - txReplyUnderpricedMeter.Mark(underpriced) - txReplyOtherRejectMeter.Mark(otherreject) - } else { - txBroadcastKnownMeter.Mark(duplicate) - txBroadcastUnderpricedMeter.Mark(underpriced) - txBroadcastOtherRejectMeter.Mark(otherreject) } select { case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: + time.Sleep(delay) return nil case <-f.quit: return errTerminated From 02418c2fa965e61f5bc1e66e1063482eb5293e5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 22 Aug 2022 10:14:56 +0300 Subject: [PATCH 14/26] Revert "eth/fetcher: don't spend too much time on transaction inclusion" (#25567) Revert "eth/fetcher: don't spend too much time on transaction inclusion (#25524)" This reverts commit 0ce494b60cd00d70f1f9f2dd0b9bfbd76204168a. --- cmd/devp2p/internal/ethtest/helpers.go | 6 +- cmd/devp2p/internal/ethtest/suite.go | 4 - cmd/devp2p/internal/ethtest/transaction.go | 6 +- eth/fetcher/tx_fetcher.go | 98 +++++++++------------- 4 files changed, 42 insertions(+), 72 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index b57649ade99..eeeb4f93cab 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -357,13 +357,9 @@ func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error { return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) } return nil - - // ignore tx announcements from previous tests case *NewPooledTransactionHashes: + // ignore tx announcements from previous tests continue - case *Transactions: - continue - default: return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) } diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 4497478d72d..7059b4ba738 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -544,13 +544,9 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) } return - // ignore propagated txs from previous tests case *NewPooledTransactionHashes: continue - case *Transactions: - continue - // ignore block announcements from previous tests case *NewBlockHashes: continue diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index baa55bd4926..c4748bf8f7d 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -29,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) -// var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") func (s *Suite) sendSuccessfulTxs(t *utesting.T) error { @@ -192,10 +192,10 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction nonce = txs[len(txs)-1].Nonce() // Wait for the transaction announcement(s) and make sure all sent txs are being propagated. - // all txs should be announced within a couple announcements. + // all txs should be announced within 3 announcements. recvHashes := make([]common.Hash, 0) - for i := 0; i < 20; i++ { + for i := 0; i < 3; i++ { switch msg := recvConn.readAndServe(s.chain, timeout).(type) { case *Transactions: for _, tx := range *msg { diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 7c8f16df531..035e0c2ec7d 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -262,79 +262,57 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { // direct request replies. The differentiation is important so the fetcher can // re-schedule missing transactions as soon as possible. func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error { - var ( - inMeter = txReplyInMeter - knownMeter = txReplyKnownMeter - underpricedMeter = txReplyUnderpricedMeter - otherRejectMeter = txReplyOtherRejectMeter - ) - if !direct { - inMeter = txBroadcastInMeter - knownMeter = txBroadcastKnownMeter - underpricedMeter = txBroadcastUnderpricedMeter - otherRejectMeter = txBroadcastOtherRejectMeter - } // Keep track of all the propagated transactions - inMeter.Mark(int64(len(txs))) - + if direct { + txReplyInMeter.Mark(int64(len(txs))) + } else { + txBroadcastInMeter.Mark(int64(len(txs))) + } // Push all the transactions into the pool, tracking underpriced ones to avoid // re-requesting them and dropping the peer in case of malicious transfers. var ( - added = make([]common.Hash, 0, len(txs)) - delay time.Duration + added = make([]common.Hash, 0, len(txs)) + duplicate int64 + underpriced int64 + otherreject int64 ) - // proceed in batches - for i := 0; i < len(txs); i += 128 { - end := i + 128 - if end > len(txs) { - end = len(txs) - } - var ( - duplicate int64 - underpriced int64 - otherreject int64 - ) - batch := txs[i:end] - for j, err := range f.addTxs(batch) { - // Track the transaction hash if the price is too low for us. - // Avoid re-request this transaction when we receive another - // announcement. - if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) { - for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { - f.underpriced.Pop() - } - f.underpriced.Add(batch[j].Hash()) + errs := f.addTxs(txs) + for i, err := range errs { + // Track the transaction hash if the price is too low for us. + // Avoid re-request this transaction when we receive another + // announcement. + if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) { + for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { + f.underpriced.Pop() } - // Track a few interesting failure types - switch { - case err == nil: // Noop, but need to handle to not count these + f.underpriced.Add(txs[i].Hash()) + } + // Track a few interesting failure types + switch { + case err == nil: // Noop, but need to handle to not count these - case errors.Is(err, core.ErrAlreadyKnown): - duplicate++ + case errors.Is(err, core.ErrAlreadyKnown): + duplicate++ - case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced): - underpriced++ + case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced): + underpriced++ - default: - otherreject++ - } - added = append(added, batch[j].Hash()) - } - knownMeter.Mark(duplicate) - underpricedMeter.Mark(underpriced) - otherRejectMeter.Mark(otherreject) - - // If 'other reject' is >25% of the deliveries in any batch, abort. Either we are - // out of sync with the chain or the peer is griefing us. - if otherreject > 128/4 { - delay = 200 * time.Millisecond - log.Warn("Peer delivering useless transactions", "peer", peer, "ignored", len(txs)-end) - break + default: + otherreject++ } + added = append(added, txs[i].Hash()) + } + if direct { + txReplyKnownMeter.Mark(duplicate) + txReplyUnderpricedMeter.Mark(underpriced) + txReplyOtherRejectMeter.Mark(otherreject) + } else { + txBroadcastKnownMeter.Mark(duplicate) + txBroadcastUnderpricedMeter.Mark(underpriced) + txBroadcastOtherRejectMeter.Mark(otherreject) } select { case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: - time.Sleep(delay) return nil case <-f.quit: return errTerminated From 395f3d4bf689c199e93e05af57ebff09b10f1c9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 22 Aug 2022 11:27:39 +0300 Subject: [PATCH 15/26] eth/catalyst: warn less frequently if no beacon client is available (#25569) * eth/catalyst: warn less frequently if no beacon client is available * eth/catalyst: tweak warning frequency a bit * eth/catalyst: some more tweaks * Update api.go Co-authored-by: Felix Lange --- eth/catalyst/api.go | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 358529459b1..b159f34e64b 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -61,10 +61,23 @@ const ( // have lead to some bad ancestor block. It's just an OOM protection. invalidTipsetsCap = 512 - // beaconUpdateTimeout is the max time allowed for a beacon client to signal - // use (from the last heartbeat) before it's consifered offline and the user + // beaconUpdateStartupTimeout is the time to wait for a beacon client to get + // attached before starting to issue warnings. + beaconUpdateStartupTimeout = 30 * time.Second + + // beaconUpdateExchangeTimeout is the max time allowed for a beacon client to + // do a transition config exchange before it's considered offline and the user // is warned. - beaconUpdateTimeout = 30 * time.Second + beaconUpdateExchangeTimeout = 2 * time.Minute + + // beaconUpdateConsensusTimeout is the max time allowed for a beacon client + // to send a consensus update before it's considered offline and the user is + // warned. + beaconUpdateConsensusTimeout = 30 * time.Second + + // beaconUpdateWarnFrequency is the frequency at which to warn the user that + // the beacon client is offline. + beaconUpdateWarnFrequency = 5 * time.Minute ) type ConsensusAPI struct { @@ -545,9 +558,9 @@ func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) beacon.Pa // // TODO(karalabe): Spin this goroutine down somehow func (api *ConsensusAPI) heartbeat() { - // Sleep a bit more on startup since there's obviously no beacon client yet + // Sleep a bit on startup since there's obviously no beacon client yet // attached, so no need to print scary warnings to the user. - time.Sleep(beaconUpdateTimeout) + time.Sleep(beaconUpdateStartupTimeout) var ( offlineLogged time.Time @@ -576,9 +589,9 @@ func (api *ConsensusAPI) heartbeat() { // If there have been no updates for the past while, warn the user // that the beacon client is probably offline if api.eth.BlockChain().Config().TerminalTotalDifficultyPassed || api.eth.Merger().TDDReached() { - if time.Since(lastForkchoiceUpdate) > beaconUpdateTimeout && time.Since(lastNewPayloadUpdate) > beaconUpdateTimeout { - if time.Since(lastTransitionUpdate) > beaconUpdateTimeout { - if time.Since(offlineLogged) > beaconUpdateTimeout { + if time.Since(lastForkchoiceUpdate) > beaconUpdateConsensusTimeout && time.Since(lastNewPayloadUpdate) > beaconUpdateConsensusTimeout { + if time.Since(lastTransitionUpdate) > beaconUpdateExchangeTimeout { + if time.Since(offlineLogged) > beaconUpdateWarnFrequency { if lastTransitionUpdate.IsZero() { log.Warn("Post-merge network, but no beacon client seen. Please launch one to follow the chain!") } else { @@ -588,7 +601,7 @@ func (api *ConsensusAPI) heartbeat() { } continue } - if time.Since(offlineLogged) > beaconUpdateTimeout { + if time.Since(offlineLogged) > beaconUpdateWarnFrequency { if lastForkchoiceUpdate.IsZero() && lastNewPayloadUpdate.IsZero() { log.Warn("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!") } else { @@ -597,10 +610,12 @@ func (api *ConsensusAPI) heartbeat() { offlineLogged = time.Now() } continue + } else { + offlineLogged = time.Time{} } } else { - if time.Since(lastTransitionUpdate) > beaconUpdateTimeout { - if time.Since(offlineLogged) > beaconUpdateTimeout { + if time.Since(lastTransitionUpdate) > beaconUpdateExchangeTimeout { + if time.Since(offlineLogged) > beaconUpdateWarnFrequency { // Retrieve the last few blocks and make a rough estimate as // to when the merge transition should happen var ( @@ -654,6 +669,8 @@ func (api *ConsensusAPI) heartbeat() { offlineLogged = time.Now() } continue + } else { + offlineLogged = time.Time{} } } } From 2de49b04e56cf07f011d6d91f9d5c08847aabe8e Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 22 Aug 2022 10:36:39 +0200 Subject: [PATCH 16/26] params: release go-ethereum v1.10.22 --- params/version.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/params/version.go b/params/version.go index 258be5d5db3..5105f3578e8 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 22 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 10 // Minor version component of the current release + VersionPatch = 22 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. @@ -41,9 +41,9 @@ var VersionWithMeta = func() string { return v }() -// ArchiveVersion holds the textual version string used for Geth archives. -// e.g. "1.8.11-dea1ce05" for stable releases, or -// "1.8.13-unstable-21c059b6" for unstable releases +// ArchiveVersion holds the textual version string used for Geth archives. e.g. +// "1.8.11-dea1ce05" for stable releases, or "1.8.13-unstable-21c059b6" for unstable +// releases. func ArchiveVersion(gitCommit string) string { vsn := Version if VersionMeta != "stable" { From 6d711f0c001ccb536c5ead8bd5d07828819e7d61 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 22 Aug 2022 10:39:18 +0200 Subject: [PATCH 17/26] params: begin v1.10.23 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 5105f3578e8..e858944dd7d 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 22 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 10 // Minor version component of the current release + VersionPatch = 23 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 81bd998353789980a6ef3e493b3562750b416d96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 23 Aug 2022 14:02:51 +0300 Subject: [PATCH 18/26] core, eth/downloader: handle spurious junk bodies from racey rollbacks (#25578) * eth/downloader: handle junkbodies/receipts in the beacon sync * core: check for header presence when checking for blocks --- core/blockchain_reader.go | 3 +++ eth/downloader/skeleton.go | 13 +++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 96e9f80b6aa..5814c8a0dae 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -137,6 +137,9 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { if bc.blockCache.Contains(hash) { return true } + if !bc.HasHeader(hash, number) { + return false + } return rawdb.HasBody(bc.db, hash, number) } diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index e627c6ae5a3..517b8378c51 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -358,6 +358,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { // If the sync is already done, resume the backfiller. When the loop stops, // terminate the backfiller too. linked := len(s.progress.Subchains) == 1 && + rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) && rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) && rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead) if linked { @@ -946,12 +947,12 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo // In the case of full sync it would be enough to check for the body, // but even a full syncing node will generate a receipt once block // processing is done, so it's just one more "needless" check. - var ( - hasBody = rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1) - hasReceipt = rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1) - ) - if hasBody && hasReceipt { - linked = true + // + // The weird cascading checks are done to minimize the database reads. + linked = rawdb.HasHeader(s.db, header.ParentHash, header.Number.Uint64()-1) && + rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1) && + rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1) + if linked { break } } From 5758d1fb1162f14ebb2420017d83181e576e3d88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 23 Aug 2022 21:17:12 +0300 Subject: [PATCH 19/26] core/state, trie: fix trie flush order for proper pruning --- core/state/statedb_test.go | 40 ++++++++++++++++++++++++++++++++++++++ trie/database.go | 19 +++++++++++++++--- 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 092a4fb8711..6fe36a7ecff 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -914,3 +914,43 @@ func TestStateDBAccessList(t *testing.T) { t.Fatalf("expected empty, got %d", got) } } + +// Tests that account and storage tries are flushed in the correct order and that +// no data loss occurs. +func TestFlushOrderDataLoss(t *testing.T) { + // Create a state trie with many accounts and slots + var ( + memdb = rawdb.NewMemoryDatabase() + statedb = NewDatabase(memdb) + state, _ = New(common.Hash{}, statedb, nil) + ) + for a := byte(0); a < 10; a++ { + state.CreateAccount(common.Address{a}) + for s := byte(0); s < 10; s++ { + state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s}) + } + } + root, err := state.Commit(false) + if err != nil { + t.Fatalf("failed to commit state trie: %v", err) + } + statedb.TrieDB().Reference(root, common.Hash{}) + if err := statedb.TrieDB().Cap(1024); err != nil { + t.Fatalf("failed to cap trie dirty cache: %v", err) + } + if err := statedb.TrieDB().Commit(root, false, nil); err != nil { + t.Fatalf("failed to commit state trie: %v", err) + } + // Reopen the state trie from flushed disk and verify it + state, err = New(root, NewDatabase(memdb), nil) + if err != nil { + t.Fatalf("failed to reopen state trie: %v", err) + } + for a := byte(0); a < 10; a++ { + for s := byte(0); s < 10; s++ { + if have := state.GetState(common.Address{a}, common.Hash{a, s}); have != (common.Hash{a, s}) { + t.Errorf("account %d: slot %d: state mismatch: have %x, want %x", a, s, have, common.Hash{a, s}) + } + } + } +} diff --git a/trie/database.go b/trie/database.go index 8c154ba96df..b10bbca9bdb 100644 --- a/trie/database.go +++ b/trie/database.go @@ -776,9 +776,22 @@ func (db *Database) Update(nodes *MergedNodeSet) error { // Insert dirty nodes into the database. In the same tree, it must be // ensured that children are inserted first, then parent so that children - // can be linked with their parent correctly. The order of writing between - // different tries(account trie, storage tries) is not required. - for owner, subset := range nodes.sets { + // can be linked with their parent correctly. + // + // Note, the storage tries must be flushed before the account trie to + // retain the invariant that children go into the dirty cache first. + var order []common.Hash + for owner := range nodes.sets { + if owner == (common.Hash{}) { + continue + } + order = append(order, owner) + } + if _, ok := nodes.sets[common.Hash{}]; ok { + order = append(order, common.Hash{}) + } + for _, owner := range order { + subset := nodes.sets[owner] for _, path := range subset.paths { n, ok := subset.nodes[path] if !ok { From 45a660a4f217fc00378665773fb0a60beebac9bd Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 23 Aug 2022 20:48:50 +0200 Subject: [PATCH 20/26] consensus/beacon: don't ignore errors --- consensus/beacon/consensus.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 949c8ad816c..7e4d657413e 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -79,7 +79,10 @@ func (beacon *Beacon) Author(header *types.Header) (common.Address, error) { // VerifyHeader checks whether a header conforms to the consensus rules of the // stock Ethereum consensus engine. func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { - reached, _ := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) + reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) + if err != nil { + return err + } if !reached { return beacon.ethone.VerifyHeader(chain, header, seal) } @@ -116,11 +119,14 @@ func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers [ if len(preHeaders) == 0 { // All the headers are pos headers. Verify that the parent block reached total terminal difficulty. - if reached, _ := IsTTDReached(chain, headers[0].ParentHash, headers[0].Number.Uint64()-1); !reached { + if reached, err := IsTTDReached(chain, headers[0].ParentHash, headers[0].Number.Uint64()-1); !reached { // TTD not reached for the first block, mark subsequent with invalid terminal block + if err == nil { + err = consensus.ErrInvalidTerminalBlock + } results := make(chan error, len(headers)) for i := 0; i < len(headers); i++ { - results <- consensus.ErrInvalidTerminalBlock + results <- err } return make(chan struct{}), results } From d901d85377c2c2f05f09f423c7d739c0feecd90a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 24 Aug 2022 12:09:02 +0300 Subject: [PATCH 21/26] params: release Geth v1.10.23 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index e858944dd7d..5f24b41f285 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 23 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 10 // Minor version component of the current release + VersionPatch = 23 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From d0dc349fd36bd79f94516c866251783641ed12f1 Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Wed, 31 Aug 2022 16:14:53 +0200 Subject: [PATCH 22/26] graphql: return correct logs for tx (#25612) * graphql: fix tx logs * minor * Use optimized search for selecting tx logs --- graphql/graphql.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 97b460c205c..66c25841db9 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "math/big" + "sort" "strconv" "github.com/ethereum/go-ethereum" @@ -478,13 +479,16 @@ func (t *Transaction) getLogs(ctx context.Context) (*[]*Log, error) { if err != nil { return nil, err } - ret := make([]*Log, 0, len(logs)) - for _, log := range logs { + var ret []*Log + // Select tx logs from all block logs + ix := sort.Search(len(logs), func(i int) bool { return uint64(logs[i].TxIndex) == t.index }) + for ix < len(logs) && uint64(logs[ix].TxIndex) == t.index { ret = append(ret, &Log{ r: t.r, transaction: t, - log: log, + log: logs[ix], }) + ix++ } return &ret, nil } From 3b41be695e6e12829abf6e5edec0606ee37f14d9 Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Tue, 13 Sep 2022 21:49:52 +0200 Subject: [PATCH 23/26] graphql: fixes missing tx logs (#25745) * graphql: fix tx logs * graphql: refactor test service setup * graphql: add test for tx logs --- graphql/graphql.go | 2 +- graphql/graphql_test.go | 227 +++++++++++++++++++++------------------- graphql/service.go | 9 +- 3 files changed, 124 insertions(+), 114 deletions(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 66c25841db9..356ff669fb1 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -481,7 +481,7 @@ func (t *Transaction) getLogs(ctx context.Context) (*[]*Log, error) { } var ret []*Log // Select tx logs from all block logs - ix := sort.Search(len(logs), func(i int) bool { return uint64(logs[i].TxIndex) == t.index }) + ix := sort.Search(len(logs), func(i int) bool { return uint64(logs[i].TxIndex) >= t.index }) for ix < len(logs) && uint64(logs[ix].TxIndex) == t.index { ret = append(ret, &Log{ r: t.r, diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index d55f4e06348..491c7315211 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -17,6 +17,8 @@ package graphql import ( + "context" + "encoding/json" "fmt" "io" "math/big" @@ -51,15 +53,21 @@ func TestBuildSchema(t *testing.T) { } defer stack.Close() // Make sure the schema can be parsed and matched up to the object model. - if err := newHandler(stack, nil, nil, []string{}, []string{}); err != nil { + if _, err := newHandler(stack, nil, nil, []string{}, []string{}); err != nil { t.Errorf("Could not construct GraphQL handler: %v", err) } } // Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint func TestGraphQLBlockSerialization(t *testing.T) { - stack := createNode(t, true, false) + stack := createNode(t) defer stack.Close() + genesis := &core.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: big.NewInt(1048576), + } + newGQLService(t, stack, genesis, 10, func(i int, gen *core.BlockGen) {}) // start node if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) @@ -161,8 +169,55 @@ func TestGraphQLBlockSerialization(t *testing.T) { } func TestGraphQLBlockSerializationEIP2718(t *testing.T) { - stack := createNode(t, true, true) + // Account for signing txes + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + funds = big.NewInt(1000000000000000) + dad = common.HexToAddress("0x0000000000000000000000000000000000000dad") + ) + stack := createNode(t) defer stack.Close() + genesis := &core.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: big.NewInt(1048576), + Alloc: core.GenesisAlloc{ + address: {Balance: funds}, + // The address 0xdad sloads 0x00 and 0x01 + dad: { + Code: []byte{byte(vm.PC), byte(vm.PC), byte(vm.SLOAD), byte(vm.SLOAD)}, + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer := types.LatestSigner(genesis.Config) + newGQLService(t, stack, genesis, 1, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(common.Address{1}) + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: uint64(0), + To: &dad, + Value: big.NewInt(100), + Gas: 50000, + GasPrice: big.NewInt(params.InitialBaseFee), + }) + gen.AddTx(tx) + tx, _ = types.SignNewTx(key, signer, &types.AccessListTx{ + ChainID: genesis.Config.ChainID, + Nonce: uint64(1), + To: &dad, + Gas: 30000, + GasPrice: big.NewInt(params.InitialBaseFee), + Value: big.NewInt(50), + AccessList: types.AccessList{{ + Address: dad, + StorageKeys: []common.Hash{{0}}, + }}, + }) + gen.AddTx(tx) + }) // start node if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) @@ -198,7 +253,7 @@ func TestGraphQLBlockSerializationEIP2718(t *testing.T) { // Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { - stack := createNode(t, false, false) + stack := createNode(t) defer stack.Close() if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) @@ -212,7 +267,59 @@ func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { assert.Equal(t, http.StatusNotFound, resp.StatusCode) } -func createNode(t *testing.T, gqlEnabled bool, txEnabled bool) *node.Node { +func TestGraphQLTransactionLogs(t *testing.T) { + var ( + key, _ = crypto.GenerateKey() + addr = crypto.PubkeyToAddress(key.PublicKey) + dadStr = "0x0000000000000000000000000000000000000dad" + dad = common.HexToAddress(dadStr) + genesis = &core.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: big.NewInt(1048576), + Alloc: core.GenesisAlloc{ + addr: {Balance: big.NewInt(params.Ether)}, + dad: { + // LOG0(0, 0), LOG0(0, 0), RETURN(0, 0) + Code: common.Hex2Bytes("60006000a060006000a060006000f3"), + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + } + signer = types.LatestSigner(genesis.Config) + stack = createNode(t) + ) + defer stack.Close() + + handler := newGQLService(t, stack, genesis, 1, func(i int, gen *core.BlockGen) { + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) + gen.AddTx(tx) + tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Nonce: 1, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) + gen.AddTx(tx) + tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Nonce: 2, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) + gen.AddTx(tx) + }) + // start node + if err := stack.Start(); err != nil { + t.Fatalf("could not start node: %v", err) + } + query := `{block { transactions { logs { account { address } } } } }` + res := handler.Schema.Exec(context.Background(), query, "", map[string]interface{}{}) + if res.Errors != nil { + t.Fatalf("graphql query failed: %v", res.Errors) + } + have, err := json.Marshal(res.Data) + if err != nil { + t.Fatalf("failed to encode graphql response: %s", err) + } + want := fmt.Sprintf(`{"block":{"transactions":[{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]}]}}`, dadStr, dadStr, dadStr, dadStr, dadStr, dadStr) + if string(have) != want { + t.Errorf("response unmatch. expected %s, got %s", want, have) + } +} + +func createNode(t *testing.T) *node.Node { stack, err := node.New(&node.Config{ HTTPHost: "127.0.0.1", HTTPPort: 0, @@ -222,83 +329,12 @@ func createNode(t *testing.T, gqlEnabled bool, txEnabled bool) *node.Node { if err != nil { t.Fatalf("could not create node: %v", err) } - if !gqlEnabled { - return stack - } - if !txEnabled { - createGQLService(t, stack) - } else { - createGQLServiceWithTransactions(t, stack) - } return stack } -func createGQLService(t *testing.T, stack *node.Node) { - // create backend - ethConf := ðconfig.Config{ - Genesis: &core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: 11500000, - Difficulty: big.NewInt(1048576), - }, - Ethash: ethash.Config{ - PowMode: ethash.ModeFake, - }, - NetworkId: 1337, - TrieCleanCache: 5, - TrieCleanCacheJournal: "triecache", - TrieCleanCacheRejournal: 60 * time.Minute, - TrieDirtyCache: 5, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 5, - } - ethBackend, err := eth.New(stack, ethConf) - if err != nil { - t.Fatalf("could not create eth backend: %v", err) - } - // Create some blocks and import them - chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), - ethash.NewFaker(), ethBackend.ChainDb(), 10, func(i int, gen *core.BlockGen) {}) - _, err = ethBackend.BlockChain().InsertChain(chain) - if err != nil { - t.Fatalf("could not create import blocks: %v", err) - } - // create gql service - filterSystem := filters.NewFilterSystem(ethBackend.APIBackend, filters.Config{}) - err = New(stack, ethBackend.APIBackend, filterSystem, []string{}, []string{}) - if err != nil { - t.Fatalf("could not create graphql service: %v", err) - } -} - -func createGQLServiceWithTransactions(t *testing.T, stack *node.Node) { - // create backend - key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address := crypto.PubkeyToAddress(key.PublicKey) - funds := big.NewInt(1000000000000000) - dad := common.HexToAddress("0x0000000000000000000000000000000000000dad") - +func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) *handler { ethConf := ðconfig.Config{ - Genesis: &core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: 11500000, - Difficulty: big.NewInt(1048576), - Alloc: core.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xdad sloads 0x00 and 0x01 - dad: { - Code: []byte{ - byte(vm.PC), - byte(vm.PC), - byte(vm.SLOAD), - byte(vm.SLOAD), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - BaseFee: big.NewInt(params.InitialBaseFee), - }, + Genesis: gspec, Ethash: ethash.Config{ PowMode: ethash.ModeFake, }, @@ -310,49 +346,22 @@ func createGQLServiceWithTransactions(t *testing.T, stack *node.Node) { TrieTimeout: 60 * time.Minute, SnapshotCache: 5, } - ethBackend, err := eth.New(stack, ethConf) if err != nil { t.Fatalf("could not create eth backend: %v", err) } - signer := types.LatestSigner(ethConf.Genesis.Config) - - legacyTx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: uint64(0), - To: &dad, - Value: big.NewInt(100), - Gas: 50000, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - envelopTx, _ := types.SignNewTx(key, signer, &types.AccessListTx{ - ChainID: ethConf.Genesis.Config.ChainID, - Nonce: uint64(1), - To: &dad, - Gas: 30000, - GasPrice: big.NewInt(params.InitialBaseFee), - Value: big.NewInt(50), - AccessList: types.AccessList{{ - Address: dad, - StorageKeys: []common.Hash{{0}}, - }}, - }) - // Create some blocks and import them chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), - ethash.NewFaker(), ethBackend.ChainDb(), 1, func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - b.AddTx(legacyTx) - b.AddTx(envelopTx) - }) - + ethash.NewFaker(), ethBackend.ChainDb(), genBlocks, genfunc) _, err = ethBackend.BlockChain().InsertChain(chain) if err != nil { t.Fatalf("could not create import blocks: %v", err) } - // create gql service + // Set up handler filterSystem := filters.NewFilterSystem(ethBackend.APIBackend, filters.Config{}) - err = New(stack, ethBackend.APIBackend, filterSystem, []string{}, []string{}) + handler, err := newHandler(stack, ethBackend.APIBackend, filterSystem, []string{}, []string{}) if err != nil { t.Fatalf("could not create graphql service: %v", err) } + return handler } diff --git a/graphql/service.go b/graphql/service.go index 019026bc7ea..6f6e5833599 100644 --- a/graphql/service.go +++ b/graphql/service.go @@ -57,17 +57,18 @@ func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // New constructs a new GraphQL service instance. func New(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cors, vhosts []string) error { - return newHandler(stack, backend, filterSystem, cors, vhosts) + _, err := newHandler(stack, backend, filterSystem, cors, vhosts) + return err } // newHandler returns a new `http.Handler` that will answer GraphQL queries. // It additionally exports an interactive query browser on the / endpoint. -func newHandler(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cors, vhosts []string) error { +func newHandler(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cors, vhosts []string) (*handler, error) { q := Resolver{backend, filterSystem} s, err := graphql.ParseSchema(schema, &q) if err != nil { - return err + return nil, err } h := handler{Schema: s} handler := node.NewHTTPHandlerStack(h, cors, vhosts, nil) @@ -76,5 +77,5 @@ func newHandler(stack *node.Node, backend ethapi.Backend, filterSystem *filters. stack.RegisterHandler("GraphQL", "/graphql", handler) stack.RegisterHandler("GraphQL", "/graphql/", handler) - return nil + return &h, nil } From 972007a517c49ee9e2a359950d81c74467492ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 14 Sep 2022 11:07:10 +0200 Subject: [PATCH 24/26] Release Geth v1.10.24 --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 5f24b41f285..349bdf1c18d 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 10 // Minor version component of the current release - VersionPatch = 23 // Patch version component of the current release + VersionPatch = 24 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) From 8f61fc8b73565ff15d27fd9731b945d9d1878ec3 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Thu, 15 Sep 2022 17:50:54 +0200 Subject: [PATCH 25/26] params: set TerminalTotalDifficultyPassed to true (#25769) * params: set TerminalTotalDifficultyPassed to true * Update params/config.go Co-authored-by: Martin Holst Swende --- params/config.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/params/config.go b/params/config.go index 4d6eec8939b..80e671f9bf4 100644 --- a/params/config.go +++ b/params/config.go @@ -59,25 +59,26 @@ var ( // MainnetChainConfig is the chain parameters to run a node on the main network. MainnetChainConfig = &ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(1_150_000), - DAOForkBlock: big.NewInt(1_920_000), - DAOForkSupport: true, - EIP150Block: big.NewInt(2_463_000), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), - EIP155Block: big.NewInt(2_675_000), - EIP158Block: big.NewInt(2_675_000), - ByzantiumBlock: big.NewInt(4_370_000), - ConstantinopleBlock: big.NewInt(7_280_000), - PetersburgBlock: big.NewInt(7_280_000), - IstanbulBlock: big.NewInt(9_069_000), - MuirGlacierBlock: big.NewInt(9_200_000), - BerlinBlock: big.NewInt(12_244_000), - LondonBlock: big.NewInt(12_965_000), - ArrowGlacierBlock: big.NewInt(13_773_000), - GrayGlacierBlock: big.NewInt(15_050_000), - TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000 - Ethash: new(EthashConfig), + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(1_150_000), + DAOForkBlock: big.NewInt(1_920_000), + DAOForkSupport: true, + EIP150Block: big.NewInt(2_463_000), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(2_675_000), + EIP158Block: big.NewInt(2_675_000), + ByzantiumBlock: big.NewInt(4_370_000), + ConstantinopleBlock: big.NewInt(7_280_000), + PetersburgBlock: big.NewInt(7_280_000), + IstanbulBlock: big.NewInt(9_069_000), + MuirGlacierBlock: big.NewInt(9_200_000), + BerlinBlock: big.NewInt(12_244_000), + LondonBlock: big.NewInt(12_965_000), + ArrowGlacierBlock: big.NewInt(13_773_000), + GrayGlacierBlock: big.NewInt(15_050_000), + TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000 + TerminalTotalDifficultyPassed: true, + Ethash: new(EthashConfig), } // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. From 69568c554880b3567bace64f8848ff1be27d084d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 15 Sep 2022 17:55:58 +0200 Subject: [PATCH 26/26] params: release Geth v1.10.25 --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 349bdf1c18d..3b5978e849c 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 10 // Minor version component of the current release - VersionPatch = 24 // Patch version component of the current release + VersionPatch = 25 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string )