@@ -82,11 +82,6 @@ var Defaults = &Config{
82
82
// Database is an intermediate write layer between the trie data structures and
83
83
// the disk database. The aim is to accumulate trie writes in-memory and only
84
84
// periodically flush a couple tries to disk, garbage collecting the remainder.
85
- //
86
- // Note, the trie Database is **not** thread safe in its mutations, but it **is**
87
- // thread safe in providing individual, independent node access. The rationale
88
- // behind this split design is to provide read access to RPC handlers and sync
89
- // servers even while the trie is executing expensive garbage collection.
90
85
type Database struct {
91
86
diskdb ethdb.Database // Persistent storage for matured trie nodes
92
87
resolver ChildResolver // The handler to resolve children of nodes
@@ -113,7 +108,7 @@ type Database struct {
113
108
// cachedNode is all the information we know about a single cached trie node
114
109
// in the memory database write layer.
115
110
type cachedNode struct {
116
- node []byte // Encoded node blob
111
+ node []byte // Encoded node blob, immutable
117
112
parents uint32 // Number of live nodes referencing this one
118
113
external map [common.Hash ]struct {} // The set of external children
119
114
flushPrev common.Hash // Previous node in the flush-list
@@ -152,9 +147,9 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
152
147
}
153
148
}
154
149
155
- // insert inserts a simplified trie node into the memory database.
156
- // All nodes inserted by this function will be reference tracked
157
- // and in theory should only used for **trie nodes** insertion .
150
+ // insert inserts a trie node into the memory database. All nodes inserted by
151
+ // this function will be reference tracked. This function assumes the lock is
152
+ // already held .
158
153
func (db * Database ) insert (hash common.Hash , node []byte ) {
159
154
// If the node's already cached, skip
160
155
if _ , ok := db .dirties [hash ]; ok {
@@ -183,7 +178,7 @@ func (db *Database) insert(hash common.Hash, node []byte) {
183
178
db .dirtiesSize += common .StorageSize (common .HashLength + len (node ))
184
179
}
185
180
186
- // Node retrieves an encoded cached trie node from memory. If it cannot be found
181
+ // node retrieves an encoded cached trie node from memory. If it cannot be found
187
182
// cached, the method queries the persistent database for the content.
188
183
func (db * Database ) Node (hash common.Hash ) ([]byte , error ) {
189
184
// It doesn't make sense to retrieve the metaroot
@@ -198,11 +193,14 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
198
193
return enc , nil
199
194
}
200
195
}
201
- // Retrieve the node from the dirty cache if available
196
+ // Retrieve the node from the dirty cache if available.
202
197
db .lock .RLock ()
203
198
dirty := db .dirties [hash ]
204
199
db .lock .RUnlock ()
205
200
201
+ // Return the cached node if it's found in the dirty set.
202
+ // The dirty.node field is immutable and safe to read it
203
+ // even without lock guard.
206
204
if dirty != nil {
207
205
memcacheDirtyHitMeter .Mark (1 )
208
206
memcacheDirtyReadMeter .Mark (int64 (len (dirty .node )))
@@ -223,20 +221,6 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
223
221
return nil , errors .New ("not found" )
224
222
}
225
223
226
- // Nodes retrieves the hashes of all the nodes cached within the memory database.
227
- // This method is extremely expensive and should only be used to validate internal
228
- // states in test code.
229
- func (db * Database ) Nodes () []common.Hash {
230
- db .lock .RLock ()
231
- defer db .lock .RUnlock ()
232
-
233
- var hashes = make ([]common.Hash , 0 , len (db .dirties ))
234
- for hash := range db .dirties {
235
- hashes = append (hashes , hash )
236
- }
237
- return hashes
238
- }
239
-
240
224
// Reference adds a new reference from a parent node to a child node.
241
225
// This function is used to add reference between internal trie node
242
226
// and external node(e.g. storage trie root), all internal trie nodes
@@ -344,33 +328,28 @@ func (db *Database) dereference(hash common.Hash) {
344
328
345
329
// Cap iteratively flushes old but still referenced trie nodes until the total
346
330
// memory usage goes below the given threshold.
347
- //
348
- // Note, this method is a non-synchronized mutator. It is unsafe to call this
349
- // concurrently with other mutators.
350
331
func (db * Database ) Cap (limit common.StorageSize ) error {
332
+ db .lock .Lock ()
333
+ defer db .lock .Unlock ()
334
+
351
335
// Create a database batch to flush persistent data out. It is important that
352
336
// outside code doesn't see an inconsistent state (referenced data removed from
353
337
// memory cache during commit but not yet in persistent storage). This is ensured
354
338
// by only uncaching existing data when the database write finalizes.
355
- start := time .Now ()
356
339
batch := db .diskdb .NewBatch ()
357
- db .lock .RLock ()
358
- nodes , storage := len (db .dirties ), db .dirtiesSize
340
+ nodes , storage , start := len (db .dirties ), db .dirtiesSize , time .Now ()
359
341
360
342
// db.dirtiesSize only contains the useful data in the cache, but when reporting
361
343
// the total memory consumption, the maintenance metadata is also needed to be
362
344
// counted.
363
345
size := db .dirtiesSize + common .StorageSize (len (db .dirties )* cachedNodeSize )
364
346
size += db .childrenSize
365
- db .lock .RUnlock ()
366
347
367
348
// Keep committing nodes from the flush-list until we're below allowance
368
349
oldest := db .oldest
369
350
for size > limit && oldest != (common.Hash {}) {
370
351
// Fetch the oldest referenced node and push into the batch
371
- db .lock .RLock ()
372
352
node := db .dirties [oldest ]
373
- db .lock .RUnlock ()
374
353
rawdb .WriteLegacyTrieNode (batch , oldest , node .node )
375
354
376
355
// If we exceeded the ideal batch size, commit and reset
@@ -396,9 +375,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
396
375
return err
397
376
}
398
377
// Write successful, clear out the flushed data
399
- db .lock .Lock ()
400
- defer db .lock .Unlock ()
401
-
402
378
for db .oldest != oldest {
403
379
node := db .dirties [db .oldest ]
404
380
delete (db .dirties , db .oldest )
@@ -429,14 +405,13 @@ func (db *Database) Cap(limit common.StorageSize) error {
429
405
// Commit iterates over all the children of a particular node, writes them out
430
406
// to disk, forcefully tearing down all references in both directions. As a side
431
407
// effect, all pre-images accumulated up to this point are also written.
432
- //
433
- // Note, this method is a non-synchronized mutator. It is unsafe to call this
434
- // concurrently with other mutators.
435
408
func (db * Database ) Commit (node common.Hash , report bool ) error {
436
409
if node == (common.Hash {}) {
437
410
// There's no data to commit in this node
438
411
return nil
439
412
}
413
+ db .lock .Lock ()
414
+ defer db .lock .Unlock ()
440
415
441
416
// Create a database batch to flush persistent data out. It is important that
442
417
// outside code doesn't see an inconsistent state (referenced data removed from
@@ -446,9 +421,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
446
421
batch := db .diskdb .NewBatch ()
447
422
448
423
// Move the trie itself into the batch, flushing if enough data is accumulated
449
- db .lock .RLock ()
450
424
nodes , storage := len (db .dirties ), db .dirtiesSize
451
- db .lock .RUnlock ()
452
425
453
426
uncacher := & cleaner {db }
454
427
if err := db .commit (node , batch , uncacher ); err != nil {
@@ -461,8 +434,6 @@ func (db *Database) Commit(node common.Hash, report bool) error {
461
434
return err
462
435
}
463
436
// Uncache any leftovers in the last batch
464
- db .lock .Lock ()
465
- defer db .lock .Unlock ()
466
437
if err := batch .Replay (uncacher ); err != nil {
467
438
return err
468
439
}
@@ -490,9 +461,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
490
461
// commit is the private locked version of Commit.
491
462
func (db * Database ) commit (hash common.Hash , batch ethdb.Batch , uncacher * cleaner ) error {
492
463
// If the node does not exist, it's a previously committed node
493
- db .lock .RLock ()
494
464
node , ok := db .dirties [hash ]
495
- db .lock .RUnlock ()
496
465
if ! ok {
497
466
return nil
498
467
}
@@ -513,13 +482,11 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
513
482
if err := batch .Write (); err != nil {
514
483
return err
515
484
}
516
- db .lock .Lock ()
517
485
err := batch .Replay (uncacher )
518
- batch .Reset ()
519
- db .lock .Unlock ()
520
486
if err != nil {
521
487
return err
522
488
}
489
+ batch .Reset ()
523
490
}
524
491
return nil
525
492
}
@@ -680,8 +647,8 @@ type reader struct {
680
647
db * Database
681
648
}
682
649
683
- // Node retrieves the trie node with the given node hash.
684
- // No error will be returned if the node is not found.
650
+ // Node retrieves the trie node with the given node hash. No error will be
651
+ // returned if the node is not found.
685
652
func (reader * reader ) Node (owner common.Hash , path []byte , hash common.Hash ) ([]byte , error ) {
686
653
blob , _ := reader .db .Node (hash )
687
654
return blob , nil
0 commit comments