From a9a4e990ba958e16a712051d439968986d630051 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 16:17:51 +0100 Subject: [PATCH 01/23] common/lru: add generic LRU implementation It seems that there is no fully typed library implementation of an LRU cache. So I wrote one. The methods names are the same as github.com/hashicorp/golang-lru, the new type can be used as a drop-in replacement. --- common/lru/locked.go | 85 +++++++++++++++++ common/lru/lru.go | 206 +++++++++++++++++++++++++++++++++++++++++ common/lru/lru_test.go | 134 +++++++++++++++++++++++++++ 3 files changed, 425 insertions(+) create mode 100644 common/lru/locked.go create mode 100644 common/lru/lru.go create mode 100644 common/lru/lru_test.go diff --git a/common/lru/locked.go b/common/lru/locked.go new file mode 100644 index 00000000000..7e6ca2ad6be --- /dev/null +++ b/common/lru/locked.go @@ -0,0 +1,85 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lru + +import "sync" + +// LockedCache is an LRU cache protected by a mutex. +type LockedCache[K comparable, V any] struct { + cache Cache[K, V] + mu sync.Mutex +} + +func NewLockedCache[K comparable, V any](capacity int) *LockedCache[K, V] { + return &LockedCache[K, V]{cache: NewCache[K, V](capacity)} +} + +// Add adds a value to the cache. Returns true if an item was evicted to store the new item. +func (c *LockedCache[K, V]) Add(key K, value V) (evicted bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Add(key, value) +} + +// Contains reports whether the given key exists in the cache. +func (c *LockedCache[K, V]) Contains(key K) bool { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Contains(key) +} + +// Get retrieves a value from the cache. This marks the key as recently used. +func (c *LockedCache[K, V]) Get(key K) (value V, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Get(key) +} + +// Len returns the current number of items in the cache. +func (c *LockedCache[K, V]) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Len() +} + +// Peek retrieves a value from the cache, but does not mark the key as recently used. +func (c *LockedCache[K, V]) Peek(key K) (value V, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Peek(key) +} + +// Purge empties the cache. +func (c *LockedCache[K, V]) Purge() { + c.mu.Lock() + defer c.mu.Unlock() + + c.cache.Purge() +} + +// Remove drops an item from the cache. Returns true if the key was present in cache. +func (c *LockedCache[K, V]) Remove(key K) bool { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Remove(key) +} diff --git a/common/lru/lru.go b/common/lru/lru.go new file mode 100644 index 00000000000..cb108cb4d9d --- /dev/null +++ b/common/lru/lru.go @@ -0,0 +1,206 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package lru implements generically-typed LRU caches. +package lru + +// Cache is a simple LRU cache. +// +// This type is not safe for concurrent use. +// The zero value is not valid, instances must be created using NewCache. +type Cache[K comparable, V any] struct { + list dlist[K] + items map[K]cacheItem[K, V] + cap int +} + +type cacheItem[K any, V any] struct { + value V + node *dlistNode[K] +} + +// NewCache creates a new LRU cache. +func NewCache[K comparable, V any](capacity int) Cache[K, V] { + if capacity < 0 { + capacity = 1 + } + return Cache[K, V]{ + items: make(map[K]cacheItem[K, V]), + cap: capacity, + } +} + +// Add adds a value to the cache. Returns true if an item was evicted to store the new item. +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + item, ok := c.items[key] + if ok { + // Already exists in cache. + item.value = value + c.list.moveToFront(item.node) + return false + } + + if c.Len() >= c.cap { + // Evict an item. + node := c.list.removeLast() + delete(c.items, node.v) + evicted = true + } + + // Store the new item. + item = cacheItem[K, V]{value: value, node: c.list.push(key)} + c.items[key] = item + return evicted +} + +// Contains reports whether the given key exists in the cache. +func (c *Cache[K, V]) Contains(key K) bool { + _, ok := c.items[key] + return ok +} + +// Get retrieves a value from the cache. This marks the key as recently used. +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { + item, ok := c.items[key] + if !ok { + return value, false + } + c.list.moveToFront(item.node) + return item.value, true +} + +// GetOldest retrieves the least-recently-used item. +// Note that this does not update the item's recency. +func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { + if c.list.tail == nil { + return key, value, false + } + key = c.list.tail.v + item := c.items[key] + return key, item.value, true +} + +// Len returns the current number of items in the cache. +func (c *Cache[K, V]) Len() int { + return len(c.items) +} + +// Peek retrieves a value from the cache, but does not mark the key as recently used. +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { + item, ok := c.items[key] + if !ok { + return value, false + } + return item.value, true +} + +// Purge empties the cache. +func (c *Cache[K, V]) Purge() { + c.list.init() + for k := range c.items { + delete(c.items, k) + } +} + +// Remove drops an item from the cache. Returns true if the key was present in cache. +func (c *Cache[K, V]) Remove(key K) bool { + item, ok := c.items[key] + if ok { + delete(c.items, key) + c.list.remove(item.node) + } + return ok +} + +// RemoveOldest drops the least recently used item. +func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { + if c.list.tail == nil { + return key, value, false + } + key = c.list.tail.v + item := c.items[key] + delete(c.items, key) + c.list.remove(c.list.tail) + return key, item.value, true +} + +// dlist is a doubly-linked list holding items of type T. +type dlist[T any] struct { + head *dlistNode[T] + tail *dlistNode[T] +} + +type dlistNode[T any] struct { + v T + next *dlistNode[T] + prev *dlistNode[T] +} + +// init reinitializes the list, making it empty. +func (l *dlist[T]) init() { + l.head, l.tail = nil, nil +} + +// push adds a new item to the front of the list and returns the +func (l *dlist[T]) push(item T) *dlistNode[T] { + node := &dlistNode[T]{v: item} + l.pushNode(node) + return node +} + +func (l *dlist[T]) pushNode(node *dlistNode[T]) { + if l.head == nil { + // List is empty, new node is head and tail. + l.head = node + l.tail = node + } else { + node.next = l.head + l.head.prev = node + l.head = node + } +} + +// moveToFront makes 'node' the head of the list. +func (l *dlist[T]) moveToFront(node *dlistNode[T]) { + l.remove(node) + l.pushNode(node) +} + +// remove removes an element from the list. +func (l *dlist[T]) remove(node *dlistNode[T]) { + if node.next != nil { + node.next.prev = node.prev + } + if node.prev != nil { + node.prev.next = node.next + } + if l.head == node { + l.head = node.next + } + if l.tail == node { + l.tail = node.prev + } + node.next, node.prev = nil, nil +} + +// removeLast removes the last element of the list. +func (l *dlist[T]) removeLast() *dlistNode[T] { + last := l.tail + if last != nil { + l.remove(last) + } + return last +} diff --git a/common/lru/lru_test.go b/common/lru/lru_test.go new file mode 100644 index 00000000000..0fc3f88eb7b --- /dev/null +++ b/common/lru/lru_test.go @@ -0,0 +1,134 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lru + +import ( + "testing" +) + +// keys returns all keys in the cache. +func (c *Cache[K, V]) keys() []K { + keys := make([]K, 0, len(c.items)) + for node := c.list.head; node != nil; node = node.next { + keys = append(keys, node.v) + } + return keys +} + +func TestCache(t *testing.T) { + cache := NewCache[int, int](128) + + for i := 0; i < 256; i++ { + cache.Add(i, i) + } + if cache.Len() != 128 { + t.Fatalf("bad len: %v", cache.Len()) + } + + for i := 0; i < 128; i++ { + _, ok := cache.Get(i) + if ok { + t.Fatalf("%d should be evicted", i) + } + } + for i := 128; i < 256; i++ { + _, ok := cache.Get(i) + if !ok { + t.Fatalf("%d should not be evicted", i) + } + } + + for i := 128; i < 192; i++ { + ok := cache.Remove(i) + if !ok { + t.Fatalf("%d should be in cache", i) + } + ok = cache.Remove(i) + if ok { + t.Fatalf("%d should not be in cache", i) + } + _, ok = cache.Get(i) + if ok { + t.Fatalf("%d should be deleted", i) + } + } + + cache.Purge() + if cache.Len() != 0 { + t.Fatalf("bad len: %v", cache.Len()) + } + if _, ok := cache.Get(200); ok { + t.Fatalf("should contain nothing") + } +} + +// This test checks GetOldest and RemoveOldest. +func TestCacheGetOldest(t *testing.T) { + cache := NewCache[int, int](128) + for i := 0; i < 256; i++ { + cache.Add(i, i) + } + + k, _, ok := cache.GetOldest() + if !ok { + t.Fatalf("missing") + } + if k != 128 { + t.Fatalf("bad: %v", k) + } + + k, _, ok = cache.RemoveOldest() + if !ok { + t.Fatalf("missing") + } + if k != 128 { + t.Fatalf("bad: %v", k) + } + + k, _, ok = cache.RemoveOldest() + if !ok { + t.Fatalf("missing oldest item") + } + if k != 129 { + t.Fatalf("wrong oldest item: %v", k) + } +} + +// Test that Add returns true/false if an eviction occurred +func TestCacheAddReturnValue(t *testing.T) { + cache := NewCache[int, int](1) + if cache.Add(1, 1) { + t.Errorf("first add shouldn't have evicted") + } + if !cache.Add(2, 2) { + t.Errorf("second add should have evicted") + } +} + +// This test verifies that Contains doesn't change item recency. +func TestCacheContains(t *testing.T) { + cache := NewCache[int, int](2) + cache.Add(1, 1) + cache.Add(2, 2) + if !cache.Contains(1) { + t.Errorf("1 should be in the cache") + } + cache.Add(3, 3) + if cache.Contains(1) { + t.Errorf("Contains should not have updated recency of 1") + } +} From 3681d46746276fa33c58282e5ae6ae9cf3c89ff4 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 16:23:59 +0100 Subject: [PATCH 02/23] common/lru: use new LRU implementation in SizeConstrainedLRU --- common/lru/blob_lru.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go index b24684256c8..ddcc1f3e945 100644 --- a/common/lru/blob_lru.go +++ b/common/lru/blob_lru.go @@ -21,7 +21,6 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" - "github.com/hashicorp/golang-lru/simplelru" ) // SizeConstrainedLRU is a wrapper around simplelru.LRU. The simplelru.LRU is capable @@ -32,20 +31,16 @@ import ( type SizeConstrainedLRU struct { size uint64 maxSize uint64 - lru *simplelru.LRU + lru Cache[common.Hash, []byte] lock sync.Mutex } // NewSizeConstrainedLRU creates a new SizeConstrainedLRU. func NewSizeConstrainedLRU(max uint64) *SizeConstrainedLRU { - lru, err := simplelru.NewLRU(math.MaxInt, nil) - if err != nil { - panic(err) - } return &SizeConstrainedLRU{ size: 0, maxSize: max, - lru: lru, + lru: NewCache[common.Hash, []byte](math.MaxInt), } } @@ -68,7 +63,7 @@ func (c *SizeConstrainedLRU) Add(key common.Hash, value []byte) (evicted bool) { // list is now empty. Break break } - targetSize -= uint64(len(v.([]byte))) + targetSize -= uint64(len(v)) } c.size = targetSize } @@ -82,7 +77,7 @@ func (c *SizeConstrainedLRU) Get(key common.Hash) []byte { defer c.lock.Unlock() if v, ok := c.lru.Get(key); ok { - return v.([]byte) + return v } return nil } From a05a7ab6c8bfd7f83894ce83ce8930e1586c92e6 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 19:00:43 +0100 Subject: [PATCH 03/23] common/lru: update comment for SizeConstrainedLRU --- common/lru/blob_lru.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go index ddcc1f3e945..85e9a9b330e 100644 --- a/common/lru/blob_lru.go +++ b/common/lru/blob_lru.go @@ -23,9 +23,10 @@ import ( "github.com/ethereum/go-ethereum/common" ) -// SizeConstrainedLRU is a wrapper around simplelru.LRU. The simplelru.LRU is capable -// of item-count constraints, but is not capable of enforcing a byte-size constraint, -// hence this wrapper. +// SizeConstrainedLRU is a LRU cache where capacity is in bytes (instead of item count). +// When the cache is at capacity, and a new item is added, the older items are evicted +// until the size constraint can be met. +// // OBS: This cache assumes that items are content-addressed: keys are unique per content. // In other words: two Add(..) with the same key K, will always have the same value V. type SizeConstrainedLRU struct { From ac86b9ea26b3f346fd2057cb10285711c6d98569 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 19:44:16 +0100 Subject: [PATCH 04/23] common/lru: rename new types --- common/lru/basiclru.go | 206 +++++++++++++++++++ common/lru/{lru_test.go => basiclru_test.go} | 10 +- common/lru/blob_lru.go | 4 +- common/lru/locked.go | 85 -------- common/lru/lru.go | 201 ++++-------------- 5 files changed, 253 insertions(+), 253 deletions(-) create mode 100644 common/lru/basiclru.go rename common/lru/{lru_test.go => basiclru_test.go} (94%) delete mode 100644 common/lru/locked.go diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go new file mode 100644 index 00000000000..87263f04cc5 --- /dev/null +++ b/common/lru/basiclru.go @@ -0,0 +1,206 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package lru implements generically-typed LRU caches. +package lru + +// BasicLRU is a simple LRU cache. +// +// This type is not safe for concurrent use. +// The zero value is not valid, instances must be created using NewCache. +type BasicLRU[K comparable, V any] struct { + list dlist[K] + items map[K]cacheItem[K, V] + cap int +} + +type cacheItem[K any, V any] struct { + value V + node *dlistNode[K] +} + +// NewBasicLRU creates a new LRU cache. +func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] { + if capacity < 0 { + capacity = 1 + } + return BasicLRU[K, V]{ + items: make(map[K]cacheItem[K, V]), + cap: capacity, + } +} + +// Add adds a value to the cache. Returns true if an item was evicted to store the new item. +func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { + item, ok := c.items[key] + if ok { + // Already exists in cache. + item.value = value + c.list.moveToFront(item.node) + return false + } + + if c.Len() >= c.cap { + // Evict an item. + node := c.list.removeLast() + delete(c.items, node.v) + evicted = true + } + + // Store the new item. + item = cacheItem[K, V]{value: value, node: c.list.push(key)} + c.items[key] = item + return evicted +} + +// Contains reports whether the given key exists in the cache. +func (c *BasicLRU[K, V]) Contains(key K) bool { + _, ok := c.items[key] + return ok +} + +// Get retrieves a value from the cache. This marks the key as recently used. +func (c *BasicLRU[K, V]) Get(key K) (value V, ok bool) { + item, ok := c.items[key] + if !ok { + return value, false + } + c.list.moveToFront(item.node) + return item.value, true +} + +// GetOldest retrieves the least-recently-used item. +// Note that this does not update the item's recency. +func (c *BasicLRU[K, V]) GetOldest() (key K, value V, ok bool) { + if c.list.tail == nil { + return key, value, false + } + key = c.list.tail.v + item := c.items[key] + return key, item.value, true +} + +// Len returns the current number of items in the cache. +func (c *BasicLRU[K, V]) Len() int { + return len(c.items) +} + +// Peek retrieves a value from the cache, but does not mark the key as recently used. +func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) { + item, ok := c.items[key] + if !ok { + return value, false + } + return item.value, true +} + +// Purge empties the cache. +func (c *BasicLRU[K, V]) Purge() { + c.list.init() + for k := range c.items { + delete(c.items, k) + } +} + +// Remove drops an item from the cache. Returns true if the key was present in cache. +func (c *BasicLRU[K, V]) Remove(key K) bool { + item, ok := c.items[key] + if ok { + delete(c.items, key) + c.list.remove(item.node) + } + return ok +} + +// RemoveOldest drops the least recently used item. +func (c *BasicLRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if c.list.tail == nil { + return key, value, false + } + key = c.list.tail.v + item := c.items[key] + delete(c.items, key) + c.list.remove(c.list.tail) + return key, item.value, true +} + +// dlist is a doubly-linked list holding items of type T. +type dlist[T any] struct { + head *dlistNode[T] + tail *dlistNode[T] +} + +type dlistNode[T any] struct { + v T + next *dlistNode[T] + prev *dlistNode[T] +} + +// init reinitializes the list, making it empty. +func (l *dlist[T]) init() { + l.head, l.tail = nil, nil +} + +// push adds a new item to the front of the list and returns the +func (l *dlist[T]) push(item T) *dlistNode[T] { + node := &dlistNode[T]{v: item} + l.pushNode(node) + return node +} + +func (l *dlist[T]) pushNode(node *dlistNode[T]) { + if l.head == nil { + // List is empty, new node is head and tail. + l.head = node + l.tail = node + } else { + node.next = l.head + l.head.prev = node + l.head = node + } +} + +// moveToFront makes 'node' the head of the list. +func (l *dlist[T]) moveToFront(node *dlistNode[T]) { + l.remove(node) + l.pushNode(node) +} + +// remove removes an element from the list. +func (l *dlist[T]) remove(node *dlistNode[T]) { + if node.next != nil { + node.next.prev = node.prev + } + if node.prev != nil { + node.prev.next = node.next + } + if l.head == node { + l.head = node.next + } + if l.tail == node { + l.tail = node.prev + } + node.next, node.prev = nil, nil +} + +// removeLast removes the last element of the list. +func (l *dlist[T]) removeLast() *dlistNode[T] { + last := l.tail + if last != nil { + l.remove(last) + } + return last +} diff --git a/common/lru/lru_test.go b/common/lru/basiclru_test.go similarity index 94% rename from common/lru/lru_test.go rename to common/lru/basiclru_test.go index 0fc3f88eb7b..0bc5d0f82cf 100644 --- a/common/lru/lru_test.go +++ b/common/lru/basiclru_test.go @@ -21,7 +21,7 @@ import ( ) // keys returns all keys in the cache. -func (c *Cache[K, V]) keys() []K { +func (c *BasicLRU[K, V]) keys() []K { keys := make([]K, 0, len(c.items)) for node := c.list.head; node != nil; node = node.next { keys = append(keys, node.v) @@ -30,7 +30,7 @@ func (c *Cache[K, V]) keys() []K { } func TestCache(t *testing.T) { - cache := NewCache[int, int](128) + cache := New[int, int](128) for i := 0; i < 256; i++ { cache.Add(i, i) @@ -78,7 +78,7 @@ func TestCache(t *testing.T) { // This test checks GetOldest and RemoveOldest. func TestCacheGetOldest(t *testing.T) { - cache := NewCache[int, int](128) + cache := New[int, int](128) for i := 0; i < 256; i++ { cache.Add(i, i) } @@ -110,7 +110,7 @@ func TestCacheGetOldest(t *testing.T) { // Test that Add returns true/false if an eviction occurred func TestCacheAddReturnValue(t *testing.T) { - cache := NewCache[int, int](1) + cache := New[int, int](1) if cache.Add(1, 1) { t.Errorf("first add shouldn't have evicted") } @@ -121,7 +121,7 @@ func TestCacheAddReturnValue(t *testing.T) { // This test verifies that Contains doesn't change item recency. func TestCacheContains(t *testing.T) { - cache := NewCache[int, int](2) + cache := New[int, int](2) cache.Add(1, 1) cache.Add(2, 2) if !cache.Contains(1) { diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go index 85e9a9b330e..d853d2efd01 100644 --- a/common/lru/blob_lru.go +++ b/common/lru/blob_lru.go @@ -32,7 +32,7 @@ import ( type SizeConstrainedLRU struct { size uint64 maxSize uint64 - lru Cache[common.Hash, []byte] + lru BasicLRU[common.Hash, []byte] lock sync.Mutex } @@ -41,7 +41,7 @@ func NewSizeConstrainedLRU(max uint64) *SizeConstrainedLRU { return &SizeConstrainedLRU{ size: 0, maxSize: max, - lru: NewCache[common.Hash, []byte](math.MaxInt), + lru: NewBasicLRU[common.Hash, []byte](math.MaxInt), } } diff --git a/common/lru/locked.go b/common/lru/locked.go deleted file mode 100644 index 7e6ca2ad6be..00000000000 --- a/common/lru/locked.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package lru - -import "sync" - -// LockedCache is an LRU cache protected by a mutex. -type LockedCache[K comparable, V any] struct { - cache Cache[K, V] - mu sync.Mutex -} - -func NewLockedCache[K comparable, V any](capacity int) *LockedCache[K, V] { - return &LockedCache[K, V]{cache: NewCache[K, V](capacity)} -} - -// Add adds a value to the cache. Returns true if an item was evicted to store the new item. -func (c *LockedCache[K, V]) Add(key K, value V) (evicted bool) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.cache.Add(key, value) -} - -// Contains reports whether the given key exists in the cache. -func (c *LockedCache[K, V]) Contains(key K) bool { - c.mu.Lock() - defer c.mu.Unlock() - - return c.cache.Contains(key) -} - -// Get retrieves a value from the cache. This marks the key as recently used. -func (c *LockedCache[K, V]) Get(key K) (value V, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.cache.Get(key) -} - -// Len returns the current number of items in the cache. -func (c *LockedCache[K, V]) Len() int { - c.mu.Lock() - defer c.mu.Unlock() - - return c.cache.Len() -} - -// Peek retrieves a value from the cache, but does not mark the key as recently used. -func (c *LockedCache[K, V]) Peek(key K) (value V, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.cache.Peek(key) -} - -// Purge empties the cache. -func (c *LockedCache[K, V]) Purge() { - c.mu.Lock() - defer c.mu.Unlock() - - c.cache.Purge() -} - -// Remove drops an item from the cache. Returns true if the key was present in cache. -func (c *LockedCache[K, V]) Remove(key K) bool { - c.mu.Lock() - defer c.mu.Unlock() - - return c.cache.Remove(key) -} diff --git a/common/lru/lru.go b/common/lru/lru.go index cb108cb4d9d..225709c9634 100644 --- a/common/lru/lru.go +++ b/common/lru/lru.go @@ -14,193 +14,72 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Package lru implements generically-typed LRU caches. package lru -// Cache is a simple LRU cache. -// -// This type is not safe for concurrent use. -// The zero value is not valid, instances must be created using NewCache. -type Cache[K comparable, V any] struct { - list dlist[K] - items map[K]cacheItem[K, V] - cap int -} +import "sync" -type cacheItem[K any, V any] struct { - value V - node *dlistNode[K] +// LRU is an LRU cache protected by a mutex. +type LRU[K comparable, V any] struct { + cache BasicLRU[K, V] + mu sync.Mutex } -// NewCache creates a new LRU cache. -func NewCache[K comparable, V any](capacity int) Cache[K, V] { - if capacity < 0 { - capacity = 1 - } - return Cache[K, V]{ - items: make(map[K]cacheItem[K, V]), - cap: capacity, - } +func NewLRU[K comparable, V any](capacity int) *LRU[K, V] { + return &LRU[K, V]{cache: NewBasicLRU[K, V](capacity)} } // Add adds a value to the cache. Returns true if an item was evicted to store the new item. -func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { - item, ok := c.items[key] - if ok { - // Already exists in cache. - item.value = value - c.list.moveToFront(item.node) - return false - } - - if c.Len() >= c.cap { - // Evict an item. - node := c.list.removeLast() - delete(c.items, node.v) - evicted = true - } - - // Store the new item. - item = cacheItem[K, V]{value: value, node: c.list.push(key)} - c.items[key] = item - return evicted +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Add(key, value) } // Contains reports whether the given key exists in the cache. -func (c *Cache[K, V]) Contains(key K) bool { - _, ok := c.items[key] - return ok +func (c *LRU[K, V]) Contains(key K) bool { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Contains(key) } // Get retrieves a value from the cache. This marks the key as recently used. -func (c *Cache[K, V]) Get(key K) (value V, ok bool) { - item, ok := c.items[key] - if !ok { - return value, false - } - c.list.moveToFront(item.node) - return item.value, true -} +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() -// GetOldest retrieves the least-recently-used item. -// Note that this does not update the item's recency. -func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { - if c.list.tail == nil { - return key, value, false - } - key = c.list.tail.v - item := c.items[key] - return key, item.value, true + return c.cache.Get(key) } // Len returns the current number of items in the cache. -func (c *Cache[K, V]) Len() int { - return len(c.items) -} +func (c *LRU[K, V]) Len() int { + c.mu.Lock() + defer c.mu.Unlock() -// Peek retrieves a value from the cache, but does not mark the key as recently used. -func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { - item, ok := c.items[key] - if !ok { - return value, false - } - return item.value, true -} - -// Purge empties the cache. -func (c *Cache[K, V]) Purge() { - c.list.init() - for k := range c.items { - delete(c.items, k) - } -} - -// Remove drops an item from the cache. Returns true if the key was present in cache. -func (c *Cache[K, V]) Remove(key K) bool { - item, ok := c.items[key] - if ok { - delete(c.items, key) - c.list.remove(item.node) - } - return ok + return c.cache.Len() } -// RemoveOldest drops the least recently used item. -func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { - if c.list.tail == nil { - return key, value, false - } - key = c.list.tail.v - item := c.items[key] - delete(c.items, key) - c.list.remove(c.list.tail) - return key, item.value, true -} - -// dlist is a doubly-linked list holding items of type T. -type dlist[T any] struct { - head *dlistNode[T] - tail *dlistNode[T] -} - -type dlistNode[T any] struct { - v T - next *dlistNode[T] - prev *dlistNode[T] -} - -// init reinitializes the list, making it empty. -func (l *dlist[T]) init() { - l.head, l.tail = nil, nil -} +// Peek retrieves a value from the cache, but does not mark the key as recently used. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() -// push adds a new item to the front of the list and returns the -func (l *dlist[T]) push(item T) *dlistNode[T] { - node := &dlistNode[T]{v: item} - l.pushNode(node) - return node + return c.cache.Peek(key) } -func (l *dlist[T]) pushNode(node *dlistNode[T]) { - if l.head == nil { - // List is empty, new node is head and tail. - l.head = node - l.tail = node - } else { - node.next = l.head - l.head.prev = node - l.head = node - } -} +// Purge empties the cache. +func (c *LRU[K, V]) Purge() { + c.mu.Lock() + defer c.mu.Unlock() -// moveToFront makes 'node' the head of the list. -func (l *dlist[T]) moveToFront(node *dlistNode[T]) { - l.remove(node) - l.pushNode(node) + c.cache.Purge() } -// remove removes an element from the list. -func (l *dlist[T]) remove(node *dlistNode[T]) { - if node.next != nil { - node.next.prev = node.prev - } - if node.prev != nil { - node.prev.next = node.next - } - if l.head == node { - l.head = node.next - } - if l.tail == node { - l.tail = node.prev - } - node.next, node.prev = nil, nil -} +// Remove drops an item from the cache. Returns true if the key was present in cache. +func (c *LRU[K, V]) Remove(key K) bool { + c.mu.Lock() + defer c.mu.Unlock() -// removeLast removes the last element of the list. -func (l *dlist[T]) removeLast() *dlistNode[T] { - last := l.tail - if last != nil { - l.remove(last) - } - return last + return c.cache.Remove(key) } From 0a721ebc41f9a3a2de563271b75ab4c8679a2812 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 19:53:18 +0100 Subject: [PATCH 05/23] common/lru: parameterize BlobLRU --- common/lru/blob_lru.go | 30 +++++++++++++++--------------- common/lru/blob_lru_test.go | 6 +++--- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go index d853d2efd01..14f86ca6cec 100644 --- a/common/lru/blob_lru.go +++ b/common/lru/blob_lru.go @@ -19,29 +19,32 @@ package lru import ( "math" "sync" - - "github.com/ethereum/go-ethereum/common" ) -// SizeConstrainedLRU is a LRU cache where capacity is in bytes (instead of item count). +// BlobType is the type constraint for values stored in BlobLRU. +type BlobType interface { + []byte | string +} + +// BlobLRU is a LRU cache where capacity is in bytes (instead of item count). // When the cache is at capacity, and a new item is added, the older items are evicted // until the size constraint can be met. // // OBS: This cache assumes that items are content-addressed: keys are unique per content. // In other words: two Add(..) with the same key K, will always have the same value V. -type SizeConstrainedLRU struct { +type BlobLRU[K comparable, V BlobType] struct { size uint64 maxSize uint64 - lru BasicLRU[common.Hash, []byte] + lru BasicLRU[K, V] lock sync.Mutex } -// NewSizeConstrainedLRU creates a new SizeConstrainedLRU. -func NewSizeConstrainedLRU(max uint64) *SizeConstrainedLRU { - return &SizeConstrainedLRU{ +// NewBlobLRU creates a new SizeConstrainedLRU. +func NewBlobLRU[K comparable, V BlobType](max uint64) *BlobLRU[K, V] { + return &BlobLRU[K, V]{ size: 0, maxSize: max, - lru: NewBasicLRU[common.Hash, []byte](math.MaxInt), + lru: NewBasicLRU[K, V](math.MaxInt), } } @@ -49,7 +52,7 @@ func NewSizeConstrainedLRU(max uint64) *SizeConstrainedLRU { // OBS: This cache assumes that items are content-addressed: keys are unique per content. // In other words: two Add(..) with the same key K, will always have the same value V. // OBS: The value is _not_ copied on Add, so the caller must not modify it afterwards. -func (c *SizeConstrainedLRU) Add(key common.Hash, value []byte) (evicted bool) { +func (c *BlobLRU[K, V]) Add(key K, value V) (evicted bool) { c.lock.Lock() defer c.lock.Unlock() @@ -73,12 +76,9 @@ func (c *SizeConstrainedLRU) Add(key common.Hash, value []byte) (evicted bool) { } // Get looks up a key's value from the cache. -func (c *SizeConstrainedLRU) Get(key common.Hash) []byte { +func (c *BlobLRU[K, V]) Get(key K) (V, bool) { c.lock.Lock() defer c.lock.Unlock() - if v, ok := c.lru.Get(key); ok { - return v - } - return nil + return c.lru.Get(key) } diff --git a/common/lru/blob_lru_test.go b/common/lru/blob_lru_test.go index 4b5e6934022..2ac155f0580 100644 --- a/common/lru/blob_lru_test.go +++ b/common/lru/blob_lru_test.go @@ -31,7 +31,7 @@ func mkHash(i int) common.Hash { } func TestBlobLru(t *testing.T) { - lru := NewSizeConstrainedLRU(100) + lru := NewBlobLRU(100) var want uint64 // Add 11 items of 10 byte each. First item should be swapped out for i := 0; i < 11; i++ { @@ -70,7 +70,7 @@ func TestBlobLru(t *testing.T) { // TestBlobLruOverflow tests what happens when inserting an element exceeding // the max size func TestBlobLruOverflow(t *testing.T) { - lru := NewSizeConstrainedLRU(100) + lru := NewBlobLRU(100) // Add 10 items of 10 byte each, filling the cache for i := 0; i < 10; i++ { k := mkHash(i) @@ -108,7 +108,7 @@ func TestBlobLruOverflow(t *testing.T) { // TestBlobLruSameItem tests what happens when inserting the same k/v multiple times. func TestBlobLruSameItem(t *testing.T) { - lru := NewSizeConstrainedLRU(100) + lru := NewBlobLRU(100) // Add one 10 byte-item 10 times k := mkHash(0) v := fmt.Sprintf("value-%04d", 0) From a1d364fc2911473e25aef7a6111b18d60d68cc7a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 19:53:31 +0100 Subject: [PATCH 06/23] core/state: port to BlobLRU --- core/state/database.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/core/state/database.go b/core/state/database.go index 9f270bf0f98..07956b2fd95 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -21,12 +21,11 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - lru2 "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" - lru "github.com/hashicorp/golang-lru" ) const ( @@ -130,20 +129,19 @@ func NewDatabase(db ethdb.Database) Database { // is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { - csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ db: trie.NewDatabaseWithConfig(db, config), disk: db, - codeSizeCache: csc, - codeCache: lru2.NewSizeConstrainedLRU(codeCacheSize), + codeSizeCache: lru.NewLRU[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewBlobLRU[common.Hash, []byte](codeCacheSize), } } type cachingDB struct { db *trie.Database disk ethdb.KeyValueStore - codeSizeCache *lru.Cache - codeCache *lru2.SizeConstrainedLRU + codeSizeCache *lru.LRU[common.Hash, int] + codeCache *lru.BlobLRU[common.Hash, []byte] } // OpenTrie opens the main account trie at a specific root hash. @@ -176,10 +174,11 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { // ContractCode retrieves a particular contract's code. func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { - if code := db.codeCache.Get(codeHash); len(code) > 0 { + code, _ := db.codeCache.Get(codeHash) + if len(code) > 0 { return code, nil } - code := rawdb.ReadCode(db.disk, codeHash) + code = rawdb.ReadCode(db.disk, codeHash) if len(code) > 0 { db.codeCache.Add(codeHash, code) db.codeSizeCache.Add(codeHash, len(code)) @@ -192,10 +191,11 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error // code can't be found in the cache, then check the existence with **new** // db scheme. func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) { - if code := db.codeCache.Get(codeHash); len(code) > 0 { + code, _ := db.codeCache.Get(codeHash) + if len(code) > 0 { return code, nil } - code := rawdb.ReadCodeWithPrefix(db.disk, codeHash) + code = rawdb.ReadCodeWithPrefix(db.disk, codeHash) if len(code) > 0 { db.codeCache.Add(codeHash, code) db.codeSizeCache.Add(codeHash, len(code)) @@ -207,7 +207,7 @@ func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]b // ContractCodeSize retrieves a particular contracts code's size. func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { - return cached.(int), nil + return cached, nil } code, err := db.ContractCode(addrHash, codeHash) return len(code), err From 944b62cb4aa44c1e9843f3f7ac5af72ae3fbe605 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:05:46 +0100 Subject: [PATCH 07/23] common/lru: add Keys --- common/lru/basiclru.go | 9 +++++++++ common/lru/basiclru_test.go | 17 ++++------------- common/lru/lru.go | 8 ++++++++ 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go index 87263f04cc5..e526cc78e36 100644 --- a/common/lru/basiclru.go +++ b/common/lru/basiclru.go @@ -137,6 +137,15 @@ func (c *BasicLRU[K, V]) RemoveOldest() (key K, value V, ok bool) { return key, item.value, true } +// Keys returns all keys in the cache. +func (c *BasicLRU[K, V]) Keys() []K { + keys := make([]K, 0, len(c.items)) + for node := c.list.head; node != nil; node = node.next { + keys = append(keys, node.v) + } + return keys +} + // dlist is a doubly-linked list holding items of type T. type dlist[T any] struct { head *dlistNode[T] diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go index 0bc5d0f82cf..4c542252434 100644 --- a/common/lru/basiclru_test.go +++ b/common/lru/basiclru_test.go @@ -20,17 +20,8 @@ import ( "testing" ) -// keys returns all keys in the cache. -func (c *BasicLRU[K, V]) keys() []K { - keys := make([]K, 0, len(c.items)) - for node := c.list.head; node != nil; node = node.next { - keys = append(keys, node.v) - } - return keys -} - func TestCache(t *testing.T) { - cache := New[int, int](128) + cache := NewBasicLRU[int, int](128) for i := 0; i < 256; i++ { cache.Add(i, i) @@ -78,7 +69,7 @@ func TestCache(t *testing.T) { // This test checks GetOldest and RemoveOldest. func TestCacheGetOldest(t *testing.T) { - cache := New[int, int](128) + cache := NewBasicLRU[int, int](128) for i := 0; i < 256; i++ { cache.Add(i, i) } @@ -110,7 +101,7 @@ func TestCacheGetOldest(t *testing.T) { // Test that Add returns true/false if an eviction occurred func TestCacheAddReturnValue(t *testing.T) { - cache := New[int, int](1) + cache := NewBasicLRU[int, int](1) if cache.Add(1, 1) { t.Errorf("first add shouldn't have evicted") } @@ -121,7 +112,7 @@ func TestCacheAddReturnValue(t *testing.T) { // This test verifies that Contains doesn't change item recency. func TestCacheContains(t *testing.T) { - cache := New[int, int](2) + cache := NewBasicLRU[int, int](2) cache.Add(1, 1) cache.Add(2, 2) if !cache.Contains(1) { diff --git a/common/lru/lru.go b/common/lru/lru.go index 225709c9634..40d2638c6fa 100644 --- a/common/lru/lru.go +++ b/common/lru/lru.go @@ -83,3 +83,11 @@ func (c *LRU[K, V]) Remove(key K) bool { return c.cache.Remove(key) } + +// Keys returns all keys of items currently in the LRU. +func (c *LRU[K, V]) Keys() []K { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Keys() +} From 816bbfa311a30ee1cb5fc475cb666893cb531d98 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:05:54 +0100 Subject: [PATCH 08/23] common/lru: allow named types in BlobType --- common/lru/blob_lru.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go index 14f86ca6cec..ba2830df589 100644 --- a/common/lru/blob_lru.go +++ b/common/lru/blob_lru.go @@ -23,7 +23,7 @@ import ( // BlobType is the type constraint for values stored in BlobLRU. type BlobType interface { - []byte | string + ~[]byte | ~string } // BlobLRU is a LRU cache where capacity is in bytes (instead of item count). From 3ffe71758a6ffd64e76e8e15309aa5c6af4af87d Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:06:27 +0100 Subject: [PATCH 09/23] core: use common/lru in BlockChain --- core/blockchain.go | 37 +++++++++++++++++-------------------- core/blockchain_reader.go | 11 +++++------ 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 80f8ba76f12..0b836cf20bb 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -30,6 +30,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/consensus" @@ -45,8 +46,8 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" - lru "github.com/hashicorp/golang-lru" ) var ( @@ -200,12 +201,14 @@ type BlockChain struct { currentSafeBlock atomic.Value // Current safe head stateCache state.Database // State database to reuse between imports (contains state cache) - bodyCache *lru.Cache // Cache for the most recent block bodies - bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format - receiptsCache *lru.Cache // Cache for the most recent receipts per block - blockCache *lru.Cache // Cache for the most recent entire blocks - txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. - futureBlocks *lru.Cache // future blocks are blocks added for later processing + bodyCache *lru.LRU[common.Hash, *types.Body] + bodyRLPCache *lru.LRU[common.Hash, rlp.RawValue] + receiptsCache *lru.LRU[common.Hash, []*types.Receipt] + blockCache *lru.LRU[common.Hash, *types.Block] + txLookupCache *lru.LRU[common.Hash, *rawdb.LegacyTxLookupEntry] + + // future blocks are blocks added for later processing + futureBlocks *lru.LRU[common.Hash, *types.Block] wg sync.WaitGroup // quit chan struct{} // shutdown signal, closed in Stop. @@ -227,12 +230,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if cacheConfig == nil { cacheConfig = defaultCacheConfig } - bodyCache, _ := lru.New(bodyCacheLimit) - bodyRLPCache, _ := lru.New(bodyCacheLimit) - receiptsCache, _ := lru.New(receiptsCacheLimit) - blockCache, _ := lru.New(blockCacheLimit) - txLookupCache, _ := lru.New(txLookupCacheLimit) - futureBlocks, _ := lru.New(maxFutureBlocks) // Setup the genesis block, commit the provided genesis specification // to database if the genesis block is not present yet, or load the @@ -261,12 +258,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis }), quit: make(chan struct{}), chainmu: syncx.NewClosableMutex(), - bodyCache: bodyCache, - bodyRLPCache: bodyRLPCache, - receiptsCache: receiptsCache, - blockCache: blockCache, - txLookupCache: txLookupCache, - futureBlocks: futureBlocks, + bodyCache: lru.NewLRU[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewLRU[common.Hash, rlp.RawValue](bodyCacheLimit), + receiptsCache: lru.NewLRU[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewLRU[common.Hash, *types.Block](blockCacheLimit), + txLookupCache: lru.NewLRU[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), + futureBlocks: lru.NewLRU[common.Hash, *types.Block](maxFutureBlocks), engine: engine, vmConfig: vmConfig, } @@ -957,7 +954,7 @@ func (bc *BlockChain) procFutureBlocks() { blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) for _, hash := range bc.futureBlocks.Keys() { if block, exist := bc.futureBlocks.Peek(hash); exist { - blocks = append(blocks, block.(*types.Block)) + blocks = append(blocks, block) } } if len(blocks) > 0 { diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 5814c8a0dae..da948029a13 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -96,8 +96,7 @@ func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := bc.bodyCache.Get(hash); ok { - body := cached.(*types.Body) - return body + return cached } number := bc.hc.GetBlockNumber(hash) if number == nil { @@ -117,7 +116,7 @@ func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := bc.bodyRLPCache.Get(hash); ok { - return cached.(rlp.RawValue) + return cached } number := bc.hc.GetBlockNumber(hash) if number == nil { @@ -159,7 +158,7 @@ func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { // Short circuit if the block's already in the cache, retrieve otherwise if block, ok := bc.blockCache.Get(hash); ok { - return block.(*types.Block) + return block } block := rawdb.ReadBlock(bc.db, hash, number) if block == nil { @@ -211,7 +210,7 @@ func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*type // GetReceiptsByHash retrieves the receipts for all transactions in a given block. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { if receipts, ok := bc.receiptsCache.Get(hash); ok { - return receipts.(types.Receipts) + return receipts } number := rawdb.ReadHeaderNumber(bc.db, hash) if number == nil { @@ -255,7 +254,7 @@ func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, max func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { // Short circuit if the txlookup already in the cache, retrieve otherwise if lookup, exist := bc.txLookupCache.Get(hash); exist { - return lookup.(*rawdb.LegacyTxLookupEntry) + return lookup } tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) if tx == nil { From fd39b720a8d4ba965967845f576fae16b3ea6ec8 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:10:20 +0100 Subject: [PATCH 10/23] core: use common/lru in HeaderChain --- core/headerchain.go | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/core/headerchain.go b/core/headerchain.go index d8c415f336b..fb8ee31c0d4 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -27,6 +27,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -34,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - lru "github.com/hashicorp/golang-lru" ) const ( @@ -64,9 +64,9 @@ type HeaderChain struct { currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) - headerCache *lru.Cache // Cache for the most recent block headers - tdCache *lru.Cache // Cache for the most recent block total difficulties - numberCache *lru.Cache // Cache for the most recent block numbers + headerCache *lru.LRU[common.Hash, *types.Header] + tdCache *lru.LRU[common.Hash, *big.Int] // most recent total difficulties + numberCache *lru.LRU[common.Hash, uint64] // most recent block numbers procInterrupt func() bool @@ -77,10 +77,6 @@ type HeaderChain struct { // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points // to the parent's interrupt semaphore. func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { - headerCache, _ := lru.New(headerCacheLimit) - tdCache, _ := lru.New(tdCacheLimit) - numberCache, _ := lru.New(numberCacheLimit) - // Seed a fast but crypto originating random generator seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) if err != nil { @@ -89,9 +85,9 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c hc := &HeaderChain{ config: config, chainDb: chainDb, - headerCache: headerCache, - tdCache: tdCache, - numberCache: numberCache, + headerCache: lru.NewLRU[common.Hash, *types.Header](headerCacheLimit), + tdCache: lru.NewLRU[common.Hash, *big.Int](tdCacheLimit), + numberCache: lru.NewLRU[common.Hash, uint64](numberCacheLimit), procInterrupt: procInterrupt, rand: mrand.New(mrand.NewSource(seed.Int64())), engine: engine, @@ -115,8 +111,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c // from the cache or database func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { if cached, ok := hc.numberCache.Get(hash); ok { - number := cached.(uint64) - return &number + return &cached } number := rawdb.ReadHeaderNumber(hc.chainDb, hash) if number != nil { @@ -442,7 +437,7 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { // Short circuit if the td's already in the cache, retrieve otherwise if cached, ok := hc.tdCache.Get(hash); ok { - return cached.(*big.Int) + return cached } td := rawdb.ReadTd(hc.chainDb, hash, number) if td == nil { @@ -458,7 +453,7 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { // Short circuit if the header's already in the cache, retrieve otherwise if header, ok := hc.headerCache.Get(hash); ok { - return header.(*types.Header) + return header } header := rawdb.ReadHeader(hc.chainDb, hash, number) if header == nil { @@ -525,10 +520,9 @@ func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { if !ok { break } - h := header.(*types.Header) - rlpData, _ := rlp.EncodeToBytes(h) + rlpData, _ := rlp.EncodeToBytes(header) headers = append(headers, rlpData) - hash = h.ParentHash + hash = header.ParentHash count-- number-- } From 3003c573019adb1681201910f306130e3f234874 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:19:39 +0100 Subject: [PATCH 11/23] common/lru: fix blob LRU tests --- common/lru/blob_lru_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/common/lru/blob_lru_test.go b/common/lru/blob_lru_test.go index 2ac155f0580..58369c14223 100644 --- a/common/lru/blob_lru_test.go +++ b/common/lru/blob_lru_test.go @@ -31,7 +31,7 @@ func mkHash(i int) common.Hash { } func TestBlobLru(t *testing.T) { - lru := NewBlobLRU(100) + lru := NewBlobLRU[common.Hash, []byte](100) var want uint64 // Add 11 items of 10 byte each. First item should be swapped out for i := 0; i < 11; i++ { @@ -49,7 +49,7 @@ func TestBlobLru(t *testing.T) { // Zero:th should be evicted { k := mkHash(0) - if val := lru.Get(k); val != nil { + if _, ok := lru.Get(k); ok { t.Fatalf("should be evicted: %v", k) } } @@ -57,8 +57,8 @@ func TestBlobLru(t *testing.T) { for i := 1; i < 11; i++ { k := mkHash(i) want := fmt.Sprintf("value-%04d", i) - have := lru.Get(k) - if have == nil { + have, ok := lru.Get(k) + if !ok { t.Fatalf("missing key %v", k) } if string(have) != want { @@ -70,7 +70,7 @@ func TestBlobLru(t *testing.T) { // TestBlobLruOverflow tests what happens when inserting an element exceeding // the max size func TestBlobLruOverflow(t *testing.T) { - lru := NewBlobLRU(100) + lru := NewBlobLRU[common.Hash, []byte](100) // Add 10 items of 10 byte each, filling the cache for i := 0; i < 10; i++ { k := mkHash(i) @@ -86,7 +86,7 @@ func TestBlobLruOverflow(t *testing.T) { // Elems 0-9 should be missing for i := 1; i < 10; i++ { k := mkHash(i) - if val := lru.Get(k); val != nil { + if _, ok := lru.Get(k); ok { t.Fatalf("should be evicted: %v", k) } } @@ -108,7 +108,7 @@ func TestBlobLruOverflow(t *testing.T) { // TestBlobLruSameItem tests what happens when inserting the same k/v multiple times. func TestBlobLruSameItem(t *testing.T) { - lru := NewBlobLRU(100) + lru := NewBlobLRU[common.Hash, []byte](100) // Add one 10 byte-item 10 times k := mkHash(0) v := fmt.Sprintf("value-%04d", 0) From f9e87bf3ca3d9fd2852d153242f5803b78483d9d Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:28:17 +0100 Subject: [PATCH 12/23] p2p/dnsdisc: use common/lru --- p2p/dnsdisc/client.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index 3f914d6e941..53189e9312d 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -27,12 +27,12 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" - lru "github.com/hashicorp/golang-lru" "golang.org/x/sync/singleflight" "golang.org/x/time/rate" ) @@ -41,7 +41,7 @@ import ( type Client struct { cfg Config clock mclock.Clock - entries *lru.Cache + entries *lru.LRU[string, entry] ratelimit *rate.Limiter singleflight singleflight.Group } @@ -96,14 +96,10 @@ func (cfg Config) withDefaults() Config { // NewClient creates a client. func NewClient(cfg Config) *Client { cfg = cfg.withDefaults() - cache, err := lru.New(cfg.CacheLimit) - if err != nil { - panic(err) - } rlimit := rate.NewLimiter(rate.Limit(cfg.RateLimit), 10) return &Client{ cfg: cfg, - entries: cache, + entries: lru.NewLRU[string, entry](cfg.CacheLimit), clock: mclock.System{}, ratelimit: rlimit, } @@ -176,7 +172,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry, } cacheKey := truncateHash(hash) if e, ok := c.entries.Get(cacheKey); ok { - return e.(entry), nil + return e, nil } ei, err, _ := c.singleflight.Do(cacheKey, func() (interface{}, error) { From bc96a3ed41b7c791ca3bdaa19fe433aa258d4340 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 20:31:23 +0100 Subject: [PATCH 13/23] p2p/discover/v5wire: use common/lru --- p2p/discover/v5wire/session.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/p2p/discover/v5wire/session.go b/p2p/discover/v5wire/session.go index d52b5c11810..862c21fcee9 100644 --- a/p2p/discover/v5wire/session.go +++ b/p2p/discover/v5wire/session.go @@ -22,10 +22,10 @@ import ( "encoding/binary" "time" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/hashicorp/golang-lru/simplelru" ) const handshakeTimeout = time.Second @@ -33,7 +33,7 @@ const handshakeTimeout = time.Second // The SessionCache keeps negotiated encryption keys and // state for in-progress handshakes in the Discovery v5 wire protocol. type SessionCache struct { - sessions *simplelru.LRU + sessions lru.BasicLRU[sessionID, *session] handshakes map[sessionID]*Whoareyou clock mclock.Clock @@ -62,12 +62,8 @@ func (s *session) keysFlipped() *session { } func NewSessionCache(maxItems int, clock mclock.Clock) *SessionCache { - cache, err := simplelru.NewLRU(maxItems, nil) - if err != nil { - panic("can't create session cache") - } return &SessionCache{ - sessions: cache, + sessions: lru.NewBasicLRU[sessionID, *session](maxItems), handshakes: make(map[sessionID]*Whoareyou), clock: clock, nonceGen: generateNonce, @@ -95,11 +91,8 @@ func (sc *SessionCache) nextNonce(s *session) (Nonce, error) { // session returns the current session for the given node, if any. func (sc *SessionCache) session(id enode.ID, addr string) *session { - item, ok := sc.sessions.Get(sessionID{id, addr}) - if !ok { - return nil - } - return item.(*session) + item, _ := sc.sessions.Get(sessionID{id, addr}) + return item } // readKey returns the current read key for the given node. From 4fdb2fed34590b0c7860a04ba44ce0ab6fcab370 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 22:27:13 +0100 Subject: [PATCH 14/23] eth/filters: use common/lru --- eth/filters/filter_system.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index e86a67abfda..cd4e10607a5 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/rawdb" @@ -34,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - lru "github.com/hashicorp/golang-lru" ) // Config represents the configuration of the filter system. @@ -74,21 +74,16 @@ type Backend interface { // FilterSystem holds resources shared by all filters. type FilterSystem struct { backend Backend - logsCache *lru.Cache + logsCache *lru.LRU[common.Hash, [][]*types.Log] cfg *Config } // NewFilterSystem creates a filter system. func NewFilterSystem(backend Backend, config Config) *FilterSystem { config = config.withDefaults() - - cache, err := lru.New(config.LogCacheSize) - if err != nil { - panic(err) - } return &FilterSystem{ backend: backend, - logsCache: cache, + logsCache: lru.NewLRU[common.Hash, [][]*types.Log](config.LogCacheSize), cfg: &config, } } @@ -97,7 +92,7 @@ func NewFilterSystem(backend Backend, config Config) *FilterSystem { func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { cached, ok := sys.logsCache.Get(blockHash) if ok { - return cached.([][]*types.Log), nil + return cached, nil } logs, err := sys.backend.GetLogs(ctx, blockHash, number) From 85483145a7b8a538c9b8206714c0a3b52a768a85 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 11 Nov 2022 22:40:47 +0100 Subject: [PATCH 15/23] common/lru: fix bug with Add for existing key --- common/lru/basiclru.go | 4 ++-- common/lru/basiclru_test.go | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go index e526cc78e36..89a0f9169e1 100644 --- a/common/lru/basiclru.go +++ b/common/lru/basiclru.go @@ -49,6 +49,7 @@ func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { if ok { // Already exists in cache. item.value = value + c.items[key] = item c.list.moveToFront(item.node) return false } @@ -61,8 +62,7 @@ func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { } // Store the new item. - item = cacheItem[K, V]{value: value, node: c.list.push(key)} - c.items[key] = item + c.items[key] = cacheItem[K, V]{value, c.list.push(key)} return evicted } diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go index 4c542252434..21f9cec78a0 100644 --- a/common/lru/basiclru_test.go +++ b/common/lru/basiclru_test.go @@ -67,6 +67,18 @@ func TestCache(t *testing.T) { } } +func TestBasicLRUAddExistingKey(t *testing.T) { + cache := NewBasicLRU[int, int](1) + + cache.Add(1, 1) + cache.Add(1, 2) + + v, _ := cache.Get(1) + if v != 2 { + t.Fatal("wrong value:", v) + } +} + // This test checks GetOldest and RemoveOldest. func TestCacheGetOldest(t *testing.T) { cache := NewBasicLRU[int, int](128) From 5808a0393f9c1270a384a467eb55c77a80dfd59a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 12 Nov 2022 01:33:42 +0100 Subject: [PATCH 16/23] common/lru: add some list optimizations Two optimizations here: - Code to add/remove elements is simplified because the list is now 'anchored' in a root element. - In case of eviction, the list element used by the evicted item is re-used, and no allocations are performed by Add. --- common/lru/basiclru.go | 142 ++++++++++++++++++++++------------------- 1 file changed, 75 insertions(+), 67 deletions(-) diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go index 89a0f9169e1..644bb70bd07 100644 --- a/common/lru/basiclru.go +++ b/common/lru/basiclru.go @@ -22,14 +22,14 @@ package lru // This type is not safe for concurrent use. // The zero value is not valid, instances must be created using NewCache. type BasicLRU[K comparable, V any] struct { - list dlist[K] + list *list[K] items map[K]cacheItem[K, V] cap int } type cacheItem[K any, V any] struct { value V - node *dlistNode[K] + elem *listElem[K] } // NewBasicLRU creates a new LRU cache. @@ -37,10 +37,12 @@ func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] { if capacity < 0 { capacity = 1 } - return BasicLRU[K, V]{ + c := BasicLRU[K, V]{ items: make(map[K]cacheItem[K, V]), + list: newList[K](), cap: capacity, } + return c } // Add adds a value to the cache. Returns true if an item was evicted to store the new item. @@ -50,19 +52,24 @@ func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { // Already exists in cache. item.value = value c.items[key] = item - c.list.moveToFront(item.node) + c.list.moveToFront(item.elem) return false } + var elem *listElem[K] if c.Len() >= c.cap { - // Evict an item. - node := c.list.removeLast() - delete(c.items, node.v) + elem = c.list.removeLast() + delete(c.items, elem.v) evicted = true + } else { + elem = new(listElem[K]) } // Store the new item. - c.items[key] = cacheItem[K, V]{value, c.list.push(key)} + // Note that, if another item was evicted, we re-use its list element here. + elem.v = key + c.items[key] = cacheItem[K, V]{value, elem} + c.list.pushElem(elem) return evicted } @@ -78,17 +85,18 @@ func (c *BasicLRU[K, V]) Get(key K) (value V, ok bool) { if !ok { return value, false } - c.list.moveToFront(item.node) + c.list.moveToFront(item.elem) return item.value, true } // GetOldest retrieves the least-recently-used item. // Note that this does not update the item's recency. func (c *BasicLRU[K, V]) GetOldest() (key K, value V, ok bool) { - if c.list.tail == nil { + lastElem := c.list.last() + if lastElem == nil { return key, value, false } - key = c.list.tail.v + key = lastElem.v item := c.items[key] return key, item.value, true } @@ -101,10 +109,7 @@ func (c *BasicLRU[K, V]) Len() int { // Peek retrieves a value from the cache, but does not mark the key as recently used. func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) { item, ok := c.items[key] - if !ok { - return value, false - } - return item.value, true + return item.value, ok } // Purge empties the cache. @@ -120,96 +125,99 @@ func (c *BasicLRU[K, V]) Remove(key K) bool { item, ok := c.items[key] if ok { delete(c.items, key) - c.list.remove(item.node) + c.list.remove(item.elem) } return ok } // RemoveOldest drops the least recently used item. func (c *BasicLRU[K, V]) RemoveOldest() (key K, value V, ok bool) { - if c.list.tail == nil { + lastElem := c.list.last() + if lastElem == nil { return key, value, false } - key = c.list.tail.v + + key = lastElem.v item := c.items[key] delete(c.items, key) - c.list.remove(c.list.tail) + c.list.remove(lastElem) return key, item.value, true } // Keys returns all keys in the cache. func (c *BasicLRU[K, V]) Keys() []K { keys := make([]K, 0, len(c.items)) - for node := c.list.head; node != nil; node = node.next { - keys = append(keys, node.v) - } - return keys + return c.list.appendTo(keys) } -// dlist is a doubly-linked list holding items of type T. -type dlist[T any] struct { - head *dlistNode[T] - tail *dlistNode[T] +// list is a doubly-linked list holding items of type he. +// The zero value is not valid, use newList to create lists. +type list[T any] struct { + root listElem[T] } -type dlistNode[T any] struct { +type listElem[T any] struct { + next *listElem[T] + prev *listElem[T] v T - next *dlistNode[T] - prev *dlistNode[T] } -// init reinitializes the list, making it empty. -func (l *dlist[T]) init() { - l.head, l.tail = nil, nil +func newList[T any]() *list[T] { + l := new(list[T]) + l.init() + return l } -// push adds a new item to the front of the list and returns the -func (l *dlist[T]) push(item T) *dlistNode[T] { - node := &dlistNode[T]{v: item} - l.pushNode(node) - return node +// init reinitializes the list, making it empty. +func (l *list[T]) init() { + l.root.next = &l.root + l.root.prev = &l.root } -func (l *dlist[T]) pushNode(node *dlistNode[T]) { - if l.head == nil { - // List is empty, new node is head and tail. - l.head = node - l.tail = node - } else { - node.next = l.head - l.head.prev = node - l.head = node - } +// push adds an element to the front of the list. +func (l *list[T]) pushElem(e *listElem[T]) { + e.prev = &l.root + e.next = l.root.next + l.root.next = e + e.next.prev = e } // moveToFront makes 'node' the head of the list. -func (l *dlist[T]) moveToFront(node *dlistNode[T]) { - l.remove(node) - l.pushNode(node) +func (l *list[T]) moveToFront(e *listElem[T]) { + e.prev.next = e.next + e.next.prev = e.prev + l.pushElem(e) } // remove removes an element from the list. -func (l *dlist[T]) remove(node *dlistNode[T]) { - if node.next != nil { - node.next.prev = node.prev - } - if node.prev != nil { - node.prev.next = node.next - } - if l.head == node { - l.head = node.next - } - if l.tail == node { - l.tail = node.prev - } - node.next, node.prev = nil, nil +func (l *list[T]) remove(e *listElem[T]) { + e.prev.next = e.next + e.next.prev = e.prev + e.next, e.prev = nil, nil } // removeLast removes the last element of the list. -func (l *dlist[T]) removeLast() *dlistNode[T] { - last := l.tail +func (l *list[T]) removeLast() *listElem[T] { + last := l.last() if last != nil { l.remove(last) } return last } + +// last returns the last element of the list, or nil if the list is empty. +func (l *list[T]) last() *listElem[T] { + e := l.root.prev + if e == &l.root { + return nil + } + return e +} + +// appendTo appends all list elements to a slice. +func (l *list[T]) appendTo(slice []T) []T { + for e := l.root.next; e != &l.root; e = e.next { + slice = append(slice, e.v) + } + return slice +} From 6f9f9fb07e9f4d55bbcf3486e563a8b56e4f779c Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 12 Nov 2022 12:51:35 +0100 Subject: [PATCH 17/23] common/lru: add benchmark --- common/lru/basiclru_test.go | 73 +++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go index 21f9cec78a0..2317f12df17 100644 --- a/common/lru/basiclru_test.go +++ b/common/lru/basiclru_test.go @@ -17,6 +17,9 @@ package lru import ( + "fmt" + "io" + "math/rand" "testing" ) @@ -135,3 +138,73 @@ func TestCacheContains(t *testing.T) { t.Errorf("Contains should not have updated recency of 1") } } + +func BenchmarkLRU(b *testing.B) { + var ( + capacity = 1000 + indexes = make([]int, capacity*20) + keys = make([]string, capacity) + values = make([][]byte, capacity) + ) + for i := range indexes { + indexes[i] = rand.Intn(capacity) + } + for i := range keys { + b := make([]byte, 32) + rand.Read(b) + keys[i] = string(b) + rand.Read(b) + values[i] = b + } + + var sink []byte + + b.Run("Add/BasicLRU", func(b *testing.B) { + cache := NewBasicLRU[int, int](capacity) + for i := 0; i < b.N; i++ { + cache.Add(i, i) + } + }) + b.Run("Get/BasicLRU", func(b *testing.B) { + cache := NewBasicLRU[string, []byte](capacity) + for i := 0; i < capacity; i++ { + index := indexes[i] + cache.Add(keys[index], values[index]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := keys[indexes[i%len(indexes)]] + v, ok := cache.Get(k) + if ok { + sink = v + } + } + }) + + // // vs. github.com/hashicorp/golang-lru/simplelru + // b.Run("Add/simplelru.LRU", func(b *testing.B) { + // cache, _ := simplelru.NewLRU(capacity, nil) + // for i := 0; i < b.N; i++ { + // cache.Add(i, i) + // } + // }) + // b.Run("Get/simplelru.LRU", func(b *testing.B) { + // cache, _ := simplelru.NewLRU(capacity, nil) + // for i := 0; i < capacity; i++ { + // index := indexes[i] + // cache.Add(keys[index], values[index]) + // } + // + // b.ResetTimer() + // for i := 0; i < b.N; i++ { + // k := keys[indexes[i%len(indexes)]] + // v, ok := cache.Get(k) + // if ok { + // sink = v.([]byte) + // } + // } + // }) + + fmt.Fprintln(io.Discard, sink) +} From 2a35446c4334cd83963e1e65d6d47710cf793d3e Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 12 Nov 2022 13:00:33 +0100 Subject: [PATCH 18/23] common/lru: change Keys() order to match simplelru --- common/lru/basiclru.go | 2 +- common/lru/basiclru_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go index 644bb70bd07..d3059e2fa9b 100644 --- a/common/lru/basiclru.go +++ b/common/lru/basiclru.go @@ -216,7 +216,7 @@ func (l *list[T]) last() *listElem[T] { // appendTo appends all list elements to a slice. func (l *list[T]) appendTo(slice []T) []T { - for e := l.root.next; e != &l.root; e = e.next { + for e := l.root.prev; e != &l.root; e = e.prev { slice = append(slice, e.v) } return slice diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go index 2317f12df17..784e0f63ee9 100644 --- a/common/lru/basiclru_test.go +++ b/common/lru/basiclru_test.go @@ -33,6 +33,24 @@ func TestCache(t *testing.T) { t.Fatalf("bad len: %v", cache.Len()) } + // Check that Keys returns least-recent key first. + keys := cache.Keys() + if len(keys) != 128 { + t.Fatal("wrong Keys() length", len(keys)) + } + for i, k := range keys { + v, ok := cache.Peek(k) + if !ok { + t.Fatalf("expected key %d be present", i) + } + if v != k { + t.Fatalf("expected %d == %d", k, v) + } + if v != i+128 { + t.Fatalf("wrong value at key %d: %d, want %d", i, v, i+128) + } + } + for i := 0; i < 128; i++ { _, ok := cache.Get(i) if ok { @@ -61,6 +79,15 @@ func TestCache(t *testing.T) { } } + // Request item 192. + cache.Get(192) + // It should be the last item returned by Keys(). + for i, k := range cache.Keys() { + if (i < 63 && k != i+193) || (i == 63 && k != 192) { + t.Fatalf("out of order key: %v", k) + } + } + cache.Purge() if cache.Len() != 0 { t.Fatalf("bad len: %v", cache.Len()) From f58508059dfe2e5a516c1c3cedd5b76bb811c4a9 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 12 Nov 2022 13:02:37 +0100 Subject: [PATCH 19/23] common/lru: add note about test origin --- common/lru/basiclru_test.go | 45 ++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go index 784e0f63ee9..68e13bfc92e 100644 --- a/common/lru/basiclru_test.go +++ b/common/lru/basiclru_test.go @@ -23,7 +23,10 @@ import ( "testing" ) -func TestCache(t *testing.T) { +// Some of these test cases were adapted +// from https://github.com/hashicorp/golang-lru/blob/master/simplelru/lru_test.go + +func TestBasicLRU(t *testing.T) { cache := NewBasicLRU[int, int](128) for i := 0; i < 256; i++ { @@ -110,7 +113,7 @@ func TestBasicLRUAddExistingKey(t *testing.T) { } // This test checks GetOldest and RemoveOldest. -func TestCacheGetOldest(t *testing.T) { +func TestBasicLRUGetOldest(t *testing.T) { cache := NewBasicLRU[int, int](128) for i := 0; i < 256; i++ { cache.Add(i, i) @@ -142,7 +145,7 @@ func TestCacheGetOldest(t *testing.T) { } // Test that Add returns true/false if an eviction occurred -func TestCacheAddReturnValue(t *testing.T) { +func TestBasicLRUAddReturnValue(t *testing.T) { cache := NewBasicLRU[int, int](1) if cache.Add(1, 1) { t.Errorf("first add shouldn't have evicted") @@ -153,7 +156,7 @@ func TestCacheAddReturnValue(t *testing.T) { } // This test verifies that Contains doesn't change item recency. -func TestCacheContains(t *testing.T) { +func TestBasicLRUContains(t *testing.T) { cache := NewBasicLRU[int, int](2) cache.Add(1, 1) cache.Add(2, 2) @@ -211,26 +214,26 @@ func BenchmarkLRU(b *testing.B) { // // vs. github.com/hashicorp/golang-lru/simplelru // b.Run("Add/simplelru.LRU", func(b *testing.B) { - // cache, _ := simplelru.NewLRU(capacity, nil) - // for i := 0; i < b.N; i++ { - // cache.Add(i, i) - // } + // cache, _ := simplelru.NewLRU(capacity, nil) + // for i := 0; i < b.N; i++ { + // cache.Add(i, i) + // } // }) // b.Run("Get/simplelru.LRU", func(b *testing.B) { - // cache, _ := simplelru.NewLRU(capacity, nil) - // for i := 0; i < capacity; i++ { - // index := indexes[i] - // cache.Add(keys[index], values[index]) - // } + // cache, _ := simplelru.NewLRU(capacity, nil) + // for i := 0; i < capacity; i++ { + // index := indexes[i] + // cache.Add(keys[index], values[index]) + // } // - // b.ResetTimer() - // for i := 0; i < b.N; i++ { - // k := keys[indexes[i%len(indexes)]] - // v, ok := cache.Get(k) - // if ok { - // sink = v.([]byte) - // } - // } + // b.ResetTimer() + // for i := 0; i < b.N; i++ { + // k := keys[indexes[i%len(indexes)]] + // v, ok := cache.Get(k) + // if ok { + // sink = v.([]byte) + // } + // } // }) fmt.Fprintln(io.Discard, sink) From e6f75307e958d831ff1d07055d579948186f227c Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 12 Nov 2022 13:14:43 +0100 Subject: [PATCH 20/23] common/lru: move item field This can save some space when type V is smaller than a machine word. --- common/lru/basiclru.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go index d3059e2fa9b..b3369cf1f25 100644 --- a/common/lru/basiclru.go +++ b/common/lru/basiclru.go @@ -28,8 +28,8 @@ type BasicLRU[K comparable, V any] struct { } type cacheItem[K any, V any] struct { - value V elem *listElem[K] + value V } // NewBasicLRU creates a new LRU cache. @@ -68,7 +68,7 @@ func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { // Store the new item. // Note that, if another item was evicted, we re-use its list element here. elem.v = key - c.items[key] = cacheItem[K, V]{value, elem} + c.items[key] = cacheItem[K, V]{elem, value} c.list.pushElem(elem) return evicted } From 43ff874ba81a310643a7b69de40b23aecb937cef Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sun, 13 Nov 2022 23:52:34 +0100 Subject: [PATCH 21/23] common/lru: rename types again --- common/lru/blob_lru.go | 24 ++++++++++++------------ common/lru/blob_lru_test.go | 6 +++--- common/lru/lru.go | 26 ++++++++++++++------------ core/blockchain.go | 24 ++++++++++++------------ core/headerchain.go | 12 ++++++------ core/state/database.go | 8 ++++---- eth/filters/filter_system.go | 4 ++-- p2p/dnsdisc/client.go | 4 ++-- 8 files changed, 55 insertions(+), 53 deletions(-) diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go index ba2830df589..c9b33985032 100644 --- a/common/lru/blob_lru.go +++ b/common/lru/blob_lru.go @@ -21,29 +21,29 @@ import ( "sync" ) -// BlobType is the type constraint for values stored in BlobLRU. -type BlobType interface { +// blobType is the type constraint for values stored in SizeConstrainedCache. +type blobType interface { ~[]byte | ~string } -// BlobLRU is a LRU cache where capacity is in bytes (instead of item count). -// When the cache is at capacity, and a new item is added, the older items are evicted -// until the size constraint can be met. +// SizeConstrainedCache is a cache where capacity is in bytes (instead of item count). When the cache +// is at capacity, and a new item is added, older items are evicted until the size +// constraint is met. // // OBS: This cache assumes that items are content-addressed: keys are unique per content. // In other words: two Add(..) with the same key K, will always have the same value V. -type BlobLRU[K comparable, V BlobType] struct { +type SizeConstrainedCache[K comparable, V blobType] struct { size uint64 maxSize uint64 lru BasicLRU[K, V] lock sync.Mutex } -// NewBlobLRU creates a new SizeConstrainedLRU. -func NewBlobLRU[K comparable, V BlobType](max uint64) *BlobLRU[K, V] { - return &BlobLRU[K, V]{ +// NewSizeConstrainedCache creates a new size-constrained LRU cache. +func NewSizeConstrainedCache[K comparable, V blobType](maxSize uint64) *SizeConstrainedCache[K, V] { + return &SizeConstrainedCache[K, V]{ size: 0, - maxSize: max, + maxSize: maxSize, lru: NewBasicLRU[K, V](math.MaxInt), } } @@ -52,7 +52,7 @@ func NewBlobLRU[K comparable, V BlobType](max uint64) *BlobLRU[K, V] { // OBS: This cache assumes that items are content-addressed: keys are unique per content. // In other words: two Add(..) with the same key K, will always have the same value V. // OBS: The value is _not_ copied on Add, so the caller must not modify it afterwards. -func (c *BlobLRU[K, V]) Add(key K, value V) (evicted bool) { +func (c *SizeConstrainedCache[K, V]) Add(key K, value V) (evicted bool) { c.lock.Lock() defer c.lock.Unlock() @@ -76,7 +76,7 @@ func (c *BlobLRU[K, V]) Add(key K, value V) (evicted bool) { } // Get looks up a key's value from the cache. -func (c *BlobLRU[K, V]) Get(key K) (V, bool) { +func (c *SizeConstrainedCache[K, V]) Get(key K) (V, bool) { c.lock.Lock() defer c.lock.Unlock() diff --git a/common/lru/blob_lru_test.go b/common/lru/blob_lru_test.go index 58369c14223..f3960be79be 100644 --- a/common/lru/blob_lru_test.go +++ b/common/lru/blob_lru_test.go @@ -31,7 +31,7 @@ func mkHash(i int) common.Hash { } func TestBlobLru(t *testing.T) { - lru := NewBlobLRU[common.Hash, []byte](100) + lru := NewSizeConstrainedCache[common.Hash, []byte](100) var want uint64 // Add 11 items of 10 byte each. First item should be swapped out for i := 0; i < 11; i++ { @@ -70,7 +70,7 @@ func TestBlobLru(t *testing.T) { // TestBlobLruOverflow tests what happens when inserting an element exceeding // the max size func TestBlobLruOverflow(t *testing.T) { - lru := NewBlobLRU[common.Hash, []byte](100) + lru := NewSizeConstrainedCache[common.Hash, []byte](100) // Add 10 items of 10 byte each, filling the cache for i := 0; i < 10; i++ { k := mkHash(i) @@ -108,7 +108,7 @@ func TestBlobLruOverflow(t *testing.T) { // TestBlobLruSameItem tests what happens when inserting the same k/v multiple times. func TestBlobLruSameItem(t *testing.T) { - lru := NewBlobLRU[common.Hash, []byte](100) + lru := NewSizeConstrainedCache[common.Hash, []byte](100) // Add one 10 byte-item 10 times k := mkHash(0) v := fmt.Sprintf("value-%04d", 0) diff --git a/common/lru/lru.go b/common/lru/lru.go index 40d2638c6fa..45965adb0df 100644 --- a/common/lru/lru.go +++ b/common/lru/lru.go @@ -18,18 +18,20 @@ package lru import "sync" -// LRU is an LRU cache protected by a mutex. -type LRU[K comparable, V any] struct { +// Cache is a LRU cache. +// This type is safe for concurrent use. +type Cache[K comparable, V any] struct { cache BasicLRU[K, V] mu sync.Mutex } -func NewLRU[K comparable, V any](capacity int) *LRU[K, V] { - return &LRU[K, V]{cache: NewBasicLRU[K, V](capacity)} +// NewCache creates an LRU cache. +func NewCache[K comparable, V any](capacity int) *Cache[K, V] { + return &Cache[K, V]{cache: NewBasicLRU[K, V](capacity)} } // Add adds a value to the cache. Returns true if an item was evicted to store the new item. -func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { c.mu.Lock() defer c.mu.Unlock() @@ -37,7 +39,7 @@ func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { } // Contains reports whether the given key exists in the cache. -func (c *LRU[K, V]) Contains(key K) bool { +func (c *Cache[K, V]) Contains(key K) bool { c.mu.Lock() defer c.mu.Unlock() @@ -45,7 +47,7 @@ func (c *LRU[K, V]) Contains(key K) bool { } // Get retrieves a value from the cache. This marks the key as recently used. -func (c *LRU[K, V]) Get(key K) (value V, ok bool) { +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { c.mu.Lock() defer c.mu.Unlock() @@ -53,7 +55,7 @@ func (c *LRU[K, V]) Get(key K) (value V, ok bool) { } // Len returns the current number of items in the cache. -func (c *LRU[K, V]) Len() int { +func (c *Cache[K, V]) Len() int { c.mu.Lock() defer c.mu.Unlock() @@ -61,7 +63,7 @@ func (c *LRU[K, V]) Len() int { } // Peek retrieves a value from the cache, but does not mark the key as recently used. -func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { c.mu.Lock() defer c.mu.Unlock() @@ -69,7 +71,7 @@ func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { } // Purge empties the cache. -func (c *LRU[K, V]) Purge() { +func (c *Cache[K, V]) Purge() { c.mu.Lock() defer c.mu.Unlock() @@ -77,7 +79,7 @@ func (c *LRU[K, V]) Purge() { } // Remove drops an item from the cache. Returns true if the key was present in cache. -func (c *LRU[K, V]) Remove(key K) bool { +func (c *Cache[K, V]) Remove(key K) bool { c.mu.Lock() defer c.mu.Unlock() @@ -85,7 +87,7 @@ func (c *LRU[K, V]) Remove(key K) bool { } // Keys returns all keys of items currently in the LRU. -func (c *LRU[K, V]) Keys() []K { +func (c *Cache[K, V]) Keys() []K { c.mu.Lock() defer c.mu.Unlock() diff --git a/core/blockchain.go b/core/blockchain.go index 0b836cf20bb..863e9424259 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -201,14 +201,14 @@ type BlockChain struct { currentSafeBlock atomic.Value // Current safe head stateCache state.Database // State database to reuse between imports (contains state cache) - bodyCache *lru.LRU[common.Hash, *types.Body] - bodyRLPCache *lru.LRU[common.Hash, rlp.RawValue] - receiptsCache *lru.LRU[common.Hash, []*types.Receipt] - blockCache *lru.LRU[common.Hash, *types.Block] - txLookupCache *lru.LRU[common.Hash, *rawdb.LegacyTxLookupEntry] + bodyCache *lru.Cache[common.Hash, *types.Body] + bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue] + receiptsCache *lru.Cache[common.Hash, []*types.Receipt] + blockCache *lru.Cache[common.Hash, *types.Block] + txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry] // future blocks are blocks added for later processing - futureBlocks *lru.LRU[common.Hash, *types.Block] + futureBlocks *lru.Cache[common.Hash, *types.Block] wg sync.WaitGroup // quit chan struct{} // shutdown signal, closed in Stop. @@ -258,12 +258,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis }), quit: make(chan struct{}), chainmu: syncx.NewClosableMutex(), - bodyCache: lru.NewLRU[common.Hash, *types.Body](bodyCacheLimit), - bodyRLPCache: lru.NewLRU[common.Hash, rlp.RawValue](bodyCacheLimit), - receiptsCache: lru.NewLRU[common.Hash, []*types.Receipt](receiptsCacheLimit), - blockCache: lru.NewLRU[common.Hash, *types.Block](blockCacheLimit), - txLookupCache: lru.NewLRU[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), - futureBlocks: lru.NewLRU[common.Hash, *types.Block](maxFutureBlocks), + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), + futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), engine: engine, vmConfig: vmConfig, } diff --git a/core/headerchain.go b/core/headerchain.go index fb8ee31c0d4..482b5f6fbe9 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -64,9 +64,9 @@ type HeaderChain struct { currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) - headerCache *lru.LRU[common.Hash, *types.Header] - tdCache *lru.LRU[common.Hash, *big.Int] // most recent total difficulties - numberCache *lru.LRU[common.Hash, uint64] // most recent block numbers + headerCache *lru.Cache[common.Hash, *types.Header] + tdCache *lru.Cache[common.Hash, *big.Int] // most recent total difficulties + numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers procInterrupt func() bool @@ -85,9 +85,9 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c hc := &HeaderChain{ config: config, chainDb: chainDb, - headerCache: lru.NewLRU[common.Hash, *types.Header](headerCacheLimit), - tdCache: lru.NewLRU[common.Hash, *big.Int](tdCacheLimit), - numberCache: lru.NewLRU[common.Hash, uint64](numberCacheLimit), + headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), + tdCache: lru.NewCache[common.Hash, *big.Int](tdCacheLimit), + numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), procInterrupt: procInterrupt, rand: mrand.New(mrand.NewSource(seed.Int64())), engine: engine, diff --git a/core/state/database.go b/core/state/database.go index 07956b2fd95..2de0650df89 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -132,16 +132,16 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { return &cachingDB{ db: trie.NewDatabaseWithConfig(db, config), disk: db, - codeSizeCache: lru.NewLRU[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewBlobLRU[common.Hash, []byte](codeCacheSize), + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), } } type cachingDB struct { db *trie.Database disk ethdb.KeyValueStore - codeSizeCache *lru.LRU[common.Hash, int] - codeCache *lru.BlobLRU[common.Hash, []byte] + codeSizeCache *lru.Cache[common.Hash, int] + codeCache *lru.SizeConstrainedCache[common.Hash, []byte] } // OpenTrie opens the main account trie at a specific root hash. diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index cd4e10607a5..a0fcc7e0858 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -74,7 +74,7 @@ type Backend interface { // FilterSystem holds resources shared by all filters. type FilterSystem struct { backend Backend - logsCache *lru.LRU[common.Hash, [][]*types.Log] + logsCache *lru.Cache[common.Hash, [][]*types.Log] cfg *Config } @@ -83,7 +83,7 @@ func NewFilterSystem(backend Backend, config Config) *FilterSystem { config = config.withDefaults() return &FilterSystem{ backend: backend, - logsCache: lru.NewLRU[common.Hash, [][]*types.Log](config.LogCacheSize), + logsCache: lru.NewCache[common.Hash, [][]*types.Log](config.LogCacheSize), cfg: &config, } } diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index 53189e9312d..8f1c221b803 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -41,7 +41,7 @@ import ( type Client struct { cfg Config clock mclock.Clock - entries *lru.LRU[string, entry] + entries *lru.Cache[string, entry] ratelimit *rate.Limiter singleflight singleflight.Group } @@ -99,7 +99,7 @@ func NewClient(cfg Config) *Client { rlimit := rate.NewLimiter(rate.Limit(cfg.RateLimit), 10) return &Client{ cfg: cfg, - entries: lru.NewLRU[string, entry](cfg.CacheLimit), + entries: lru.NewCache[string, entry](cfg.CacheLimit), clock: mclock.System{}, ratelimit: rlimit, } From f6dc359f7c3d0dbb0407bc13501f06be84fc0b0f Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sun, 13 Nov 2022 23:55:47 +0100 Subject: [PATCH 22/23] eth/gasprice: use common/lru --- eth/gasprice/feehistory.go | 14 ++++++++------ eth/gasprice/gasprice.go | 7 ++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 6028ef03cf1..47cc31999e0 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -56,7 +56,12 @@ type blockFees struct { err error } -// processedFees contains the results of a processed block and is also used for caching +type cacheKey struct { + number uint64 + percentiles string +} + +// processedFees contains the results of a processed block. type processedFees struct { reward []*big.Int baseFee, nextBaseFee *big.Int @@ -270,13 +275,10 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast oracle.processBlock(fees, rewardPercentiles) results <- fees } else { - cacheKey := struct { - number uint64 - percentiles string - }{blockNumber, string(percentileKey)} + cacheKey := cacheKey{number: blockNumber, percentiles: string(percentileKey)} if p, ok := oracle.historyCache.Get(cacheKey); ok { - fees.results = p.(processedFees) + fees.results = p results <- fees } else { if len(rewardPercentiles) != 0 { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 00128a5dc85..604ad5e1043 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -23,13 +23,13 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" - lru "github.com/hashicorp/golang-lru" ) const sampleNumber = 3 // Number of transactions sampled in a block @@ -72,7 +72,8 @@ type Oracle struct { checkBlocks, percentile int maxHeaderHistory, maxBlockHistory int - historyCache *lru.Cache + + historyCache *lru.Cache[cacheKey, processedFees] } // NewOracle returns a new gasprice oracle which can recommend suitable @@ -114,7 +115,7 @@ func NewOracle(backend OracleBackend, params Config) *Oracle { log.Warn("Sanitizing invalid gasprice oracle max block history", "provided", params.MaxBlockHistory, "updated", maxBlockHistory) } - cache, _ := lru.New(2048) + cache := lru.NewCache[cacheKey, processedFees](2048) headEvent := make(chan core.ChainHeadEvent, 1) backend.SubscribeChainHeadEvent(headEvent) go func() { From 1e2d0f367914ed991af4d2e7dda6b62b1b1d3cae Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 14 Nov 2022 10:43:32 +0100 Subject: [PATCH 23/23] common/lru: rename SizeConstrainedCache tests and add test for nil --- common/lru/blob_lru_test.go | 83 ++++++++++++++++++++++++++----------- 1 file changed, 58 insertions(+), 25 deletions(-) diff --git a/common/lru/blob_lru_test.go b/common/lru/blob_lru_test.go index f3960be79be..ca1b0ddd742 100644 --- a/common/lru/blob_lru_test.go +++ b/common/lru/blob_lru_test.go @@ -20,22 +20,21 @@ import ( "encoding/binary" "fmt" "testing" - - "github.com/ethereum/go-ethereum/common" ) -func mkHash(i int) common.Hash { - h := make([]byte, 32) - binary.LittleEndian.PutUint64(h, uint64(i)) - return common.BytesToHash(h) +type testKey [8]byte + +func mkKey(i int) (key testKey) { + binary.LittleEndian.PutUint64(key[:], uint64(i)) + return key } -func TestBlobLru(t *testing.T) { - lru := NewSizeConstrainedCache[common.Hash, []byte](100) +func TestSizeConstrainedCache(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) var want uint64 // Add 11 items of 10 byte each. First item should be swapped out for i := 0; i < 11; i++ { - k := mkHash(i) + k := mkKey(i) v := fmt.Sprintf("value-%04d", i) lru.Add(k, []byte(v)) want += uint64(len(v)) @@ -48,14 +47,14 @@ func TestBlobLru(t *testing.T) { } // Zero:th should be evicted { - k := mkHash(0) + k := mkKey(0) if _, ok := lru.Get(k); ok { t.Fatalf("should be evicted: %v", k) } } // Elems 1-11 should be present for i := 1; i < 11; i++ { - k := mkHash(i) + k := mkKey(i) want := fmt.Sprintf("value-%04d", i) have, ok := lru.Get(k) if !ok { @@ -67,25 +66,25 @@ func TestBlobLru(t *testing.T) { } } -// TestBlobLruOverflow tests what happens when inserting an element exceeding -// the max size -func TestBlobLruOverflow(t *testing.T) { - lru := NewSizeConstrainedCache[common.Hash, []byte](100) +// This test adds inserting an element exceeding the max size. +func TestSizeConstrainedCacheOverflow(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + // Add 10 items of 10 byte each, filling the cache for i := 0; i < 10; i++ { - k := mkHash(i) + k := mkKey(i) v := fmt.Sprintf("value-%04d", i) lru.Add(k, []byte(v)) } // Add one single large elem. We expect it to swap out all entries. { - k := mkHash(1337) + k := mkKey(1337) v := make([]byte, 200) lru.Add(k, v) } // Elems 0-9 should be missing for i := 1; i < 10; i++ { - k := mkHash(i) + k := mkKey(i) if _, ok := lru.Get(k); ok { t.Fatalf("should be evicted: %v", k) } @@ -97,7 +96,7 @@ func TestBlobLruOverflow(t *testing.T) { // Adding one small item should swap out the large one { i := 0 - k := mkHash(i) + k := mkKey(i) v := fmt.Sprintf("value-%04d", i) lru.Add(k, []byte(v)) if have, want := lru.size, uint64(10); have != want { @@ -106,17 +105,51 @@ func TestBlobLruOverflow(t *testing.T) { } } -// TestBlobLruSameItem tests what happens when inserting the same k/v multiple times. -func TestBlobLruSameItem(t *testing.T) { - lru := NewSizeConstrainedCache[common.Hash, []byte](100) - // Add one 10 byte-item 10 times - k := mkHash(0) +// This checks what happens when inserting the same k/v multiple times. +func TestSizeConstrainedCacheSameItem(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + + // Add one 10 byte-item 10 times. + k := mkKey(0) v := fmt.Sprintf("value-%04d", 0) for i := 0; i < 10; i++ { lru.Add(k, []byte(v)) } - // The size should be accurate + + // The size should be accurate. if have, want := lru.size, uint64(10); have != want { t.Fatalf("size wrong, have %d want %d", have, want) } } + +// This tests that empty/nil values are handled correctly. +func TestSizeConstrainedCacheEmpties(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + + // This test abuses the lru a bit, using different keys for identical value(s). + for i := 0; i < 10; i++ { + lru.Add(testKey{byte(i)}, []byte{}) + lru.Add(testKey{byte(255 - i)}, nil) + } + + // The size should not count, only the values count. So this could be a DoS + // since it basically has no cap, and it is intentionally overloaded with + // different-keyed 0-length values. + if have, want := lru.size, uint64(0); have != want { + t.Fatalf("size wrong, have %d want %d", have, want) + } + + for i := 0; i < 10; i++ { + if v, ok := lru.Get(testKey{byte(i)}); !ok { + t.Fatalf("test %d: expected presence", i) + } else if v == nil { + t.Fatalf("test %d, v is nil", i) + } + + if v, ok := lru.Get(testKey{byte(255 - i)}); !ok { + t.Fatalf("test %d: expected presence", i) + } else if v != nil { + t.Fatalf("test %d, v is not nil", i) + } + } +}