@@ -82,11 +82,6 @@ var Defaults = &Config{
82
82
// Database is an intermediate write layer between the trie data structures and
83
83
// the disk database. The aim is to accumulate trie writes in-memory and only
84
84
// periodically flush a couple tries to disk, garbage collecting the remainder.
85
- //
86
- // Note, the trie Database is **not** thread safe in its mutations, but it **is**
87
- // thread safe in providing individual, independent node access. The rationale
88
- // behind this split design is to provide read access to RPC handlers and sync
89
- // servers even while the trie is executing expensive garbage collection.
90
85
type Database struct {
91
86
diskdb ethdb.Database // Persistent storage for matured trie nodes
92
87
resolver ChildResolver // The handler to resolve children of nodes
@@ -113,7 +108,7 @@ type Database struct {
113
108
// cachedNode is all the information we know about a single cached trie node
114
109
// in the memory database write layer.
115
110
type cachedNode struct {
116
- node []byte // Encoded node blob
111
+ node []byte // Encoded node blob, immutable
117
112
parents uint32 // Number of live nodes referencing this one
118
113
external map [common.Hash ]struct {} // The set of external children
119
114
flushPrev common.Hash // Previous node in the flush-list
@@ -152,9 +147,9 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
152
147
}
153
148
}
154
149
155
- // insert inserts a simplified trie node into the memory database.
156
- // All nodes inserted by this function will be reference tracked
157
- // and in theory should only used for **trie nodes** insertion .
150
+ // insert inserts a trie node into the memory database. All nodes inserted by
151
+ // this function will be reference tracked. This function assumes the lock is
152
+ // already held .
158
153
func (db * Database ) insert (hash common.Hash , node []byte ) {
159
154
// If the node's already cached, skip
160
155
if _ , ok := db .dirties [hash ]; ok {
@@ -183,9 +178,9 @@ func (db *Database) insert(hash common.Hash, node []byte) {
183
178
db .dirtiesSize += common .StorageSize (common .HashLength + len (node ))
184
179
}
185
180
186
- // Node retrieves an encoded cached trie node from memory. If it cannot be found
181
+ // node retrieves an encoded cached trie node from memory. If it cannot be found
187
182
// cached, the method queries the persistent database for the content.
188
- func (db * Database ) Node (hash common.Hash ) ([]byte , error ) {
183
+ func (db * Database ) node (hash common.Hash ) ([]byte , error ) {
189
184
// It doesn't make sense to retrieve the metaroot
190
185
if hash == (common.Hash {}) {
191
186
return nil , errors .New ("not found" )
@@ -198,11 +193,14 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
198
193
return enc , nil
199
194
}
200
195
}
201
- // Retrieve the node from the dirty cache if available
196
+ // Retrieve the node from the dirty cache if available.
202
197
db .lock .RLock ()
203
198
dirty := db .dirties [hash ]
204
199
db .lock .RUnlock ()
205
200
201
+ // Return the cached node if it's found in the dirty set.
202
+ // The dirty.node field is immutable and safe to read it
203
+ // even without lock guard.
206
204
if dirty != nil {
207
205
memcacheDirtyHitMeter .Mark (1 )
208
206
memcacheDirtyReadMeter .Mark (int64 (len (dirty .node )))
@@ -223,18 +221,9 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
223
221
return nil , errors .New ("not found" )
224
222
}
225
223
226
- // Nodes retrieves the hashes of all the nodes cached within the memory database.
227
- // This method is extremely expensive and should only be used to validate internal
228
- // states in test code.
229
- func (db * Database ) Nodes () []common.Hash {
230
- db .lock .RLock ()
231
- defer db .lock .RUnlock ()
232
-
233
- var hashes = make ([]common.Hash , 0 , len (db .dirties ))
234
- for hash := range db .dirties {
235
- hashes = append (hashes , hash )
236
- }
237
- return hashes
224
+ // arbitrum: exposing hashdb.Database.Node for triedb.Database.Node currently used by arbitrum.RecordingKV.Get
225
+ func (db * Database ) Node (hash common.Hash ) ([]byte , error ) {
226
+ return db .node (hash )
238
227
}
239
228
240
229
// Reference adds a new reference from a parent node to a child node.
@@ -344,33 +333,28 @@ func (db *Database) dereference(hash common.Hash) {
344
333
345
334
// Cap iteratively flushes old but still referenced trie nodes until the total
346
335
// memory usage goes below the given threshold.
347
- //
348
- // Note, this method is a non-synchronized mutator. It is unsafe to call this
349
- // concurrently with other mutators.
350
336
func (db * Database ) Cap (limit common.StorageSize ) error {
337
+ db .lock .Lock ()
338
+ defer db .lock .Unlock ()
339
+
351
340
// Create a database batch to flush persistent data out. It is important that
352
341
// outside code doesn't see an inconsistent state (referenced data removed from
353
342
// memory cache during commit but not yet in persistent storage). This is ensured
354
343
// by only uncaching existing data when the database write finalizes.
355
- start := time .Now ()
356
344
batch := db .diskdb .NewBatch ()
357
- db .lock .RLock ()
358
- nodes , storage := len (db .dirties ), db .dirtiesSize
345
+ nodes , storage , start := len (db .dirties ), db .dirtiesSize , time .Now ()
359
346
360
347
// db.dirtiesSize only contains the useful data in the cache, but when reporting
361
348
// the total memory consumption, the maintenance metadata is also needed to be
362
349
// counted.
363
350
size := db .dirtiesSize + common .StorageSize (len (db .dirties )* cachedNodeSize )
364
351
size += db .childrenSize
365
- db .lock .RUnlock ()
366
352
367
353
// Keep committing nodes from the flush-list until we're below allowance
368
354
oldest := db .oldest
369
355
for size > limit && oldest != (common.Hash {}) {
370
356
// Fetch the oldest referenced node and push into the batch
371
- db .lock .RLock ()
372
357
node := db .dirties [oldest ]
373
- db .lock .RUnlock ()
374
358
rawdb .WriteLegacyTrieNode (batch , oldest , node .node )
375
359
376
360
// If we exceeded the ideal batch size, commit and reset
@@ -396,9 +380,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
396
380
return err
397
381
}
398
382
// Write successful, clear out the flushed data
399
- db .lock .Lock ()
400
- defer db .lock .Unlock ()
401
-
402
383
for db .oldest != oldest {
403
384
node := db .dirties [db .oldest ]
404
385
delete (db .dirties , db .oldest )
@@ -429,14 +410,13 @@ func (db *Database) Cap(limit common.StorageSize) error {
429
410
// Commit iterates over all the children of a particular node, writes them out
430
411
// to disk, forcefully tearing down all references in both directions. As a side
431
412
// effect, all pre-images accumulated up to this point are also written.
432
- //
433
- // Note, this method is a non-synchronized mutator. It is unsafe to call this
434
- // concurrently with other mutators.
435
413
func (db * Database ) Commit (node common.Hash , report bool ) error {
436
414
if node == (common.Hash {}) {
437
415
// There's no data to commit in this node
438
416
return nil
439
417
}
418
+ db .lock .Lock ()
419
+ defer db .lock .Unlock ()
440
420
441
421
// Create a database batch to flush persistent data out. It is important that
442
422
// outside code doesn't see an inconsistent state (referenced data removed from
@@ -446,9 +426,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
446
426
batch := db .diskdb .NewBatch ()
447
427
448
428
// Move the trie itself into the batch, flushing if enough data is accumulated
449
- db .lock .RLock ()
450
429
nodes , storage := len (db .dirties ), db .dirtiesSize
451
- db .lock .RUnlock ()
452
430
453
431
uncacher := & cleaner {db }
454
432
if err := db .commit (node , batch , uncacher ); err != nil {
@@ -461,8 +439,6 @@ func (db *Database) Commit(node common.Hash, report bool) error {
461
439
return err
462
440
}
463
441
// Uncache any leftovers in the last batch
464
- db .lock .Lock ()
465
- defer db .lock .Unlock ()
466
442
if err := batch .Replay (uncacher ); err != nil {
467
443
return err
468
444
}
@@ -490,9 +466,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
490
466
// commit is the private locked version of Commit.
491
467
func (db * Database ) commit (hash common.Hash , batch ethdb.Batch , uncacher * cleaner ) error {
492
468
// If the node does not exist, it's a previously committed node
493
- db .lock .RLock ()
494
469
node , ok := db .dirties [hash ]
495
- db .lock .RUnlock ()
496
470
if ! ok {
497
471
return nil
498
472
}
@@ -513,13 +487,11 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
513
487
if err := batch .Write (); err != nil {
514
488
return err
515
489
}
516
- db .lock .Lock ()
517
490
err := batch .Replay (uncacher )
518
- batch .Reset ()
519
- db .lock .Unlock ()
520
491
if err != nil {
521
492
return err
522
493
}
494
+ batch .Reset ()
523
495
}
524
496
return nil
525
497
}
@@ -588,7 +560,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
588
560
func (db * Database ) Update (root common.Hash , parent common.Hash , block uint64 , nodes * trienode.MergedNodeSet , states * triestate.Set ) error {
589
561
// Ensure the parent state is present and signal a warning if not.
590
562
if parent != types .EmptyRootHash {
591
- if blob , _ := db .Node (parent ); len (blob ) == 0 {
563
+ if blob , _ := db .node (parent ); len (blob ) == 0 {
592
564
log .Error ("parent state is not present" )
593
565
}
594
566
}
@@ -669,7 +641,7 @@ func (db *Database) Scheme() string {
669
641
// Reader retrieves a node reader belonging to the given state root.
670
642
// An error will be returned if the requested state is not available.
671
643
func (db * Database ) Reader (root common.Hash ) (* reader , error ) {
672
- if _ , err := db .Node (root ); err != nil {
644
+ if _ , err := db .node (root ); err != nil {
673
645
return nil , fmt .Errorf ("state %#x is not available, %v" , root , err )
674
646
}
675
647
return & reader {db : db }, nil
@@ -680,9 +652,9 @@ type reader struct {
680
652
db * Database
681
653
}
682
654
683
- // Node retrieves the trie node with the given node hash.
684
- // No error will be returned if the node is not found.
655
+ // Node retrieves the trie node with the given node hash. No error will be
656
+ // returned if the node is not found.
685
657
func (reader * reader ) Node (owner common.Hash , path []byte , hash common.Hash ) ([]byte , error ) {
686
- blob , _ := reader .db .Node (hash )
658
+ blob , _ := reader .db .node (hash )
687
659
return blob , nil
688
660
}
0 commit comments