@@ -161,7 +161,7 @@ type backfiller interface {
161
161
// on initial startup.
162
162
//
163
163
// The method should return the last block header that has been successfully
164
- // backfilled, or nil if the backfiller was not resumed .
164
+ // backfilled (in the current or a previous run), falling back to the genesis .
165
165
suspend () * types.Header
166
166
167
167
// resume requests the backfiller to start running fill or snap sync based on
@@ -382,14 +382,17 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
382
382
done := make (chan struct {})
383
383
go func () {
384
384
defer close (done )
385
- if filled := s .filler .suspend (); filled != nil {
386
- // If something was filled, try to delete stale sync helpers. If
387
- // unsuccessful, warn the user, but not much else we can do (it's
388
- // a programming error, just let users report an issue and don't
389
- // choke in the meantime).
390
- if err := s .cleanStales (filled ); err != nil {
391
- log .Error ("Failed to clean stale beacon headers" , "err" , err )
392
- }
385
+ filled := s .filler .suspend ()
386
+ if filled == nil {
387
+ log .Error ("Latest filled block is not available" )
388
+ return
389
+ }
390
+ // If something was filled, try to delete stale sync helpers. If
391
+ // unsuccessful, warn the user, but not much else we can do (it's
392
+ // a programming error, just let users report an issue and don't
393
+ // choke in the meantime).
394
+ if err := s .cleanStales (filled ); err != nil {
395
+ log .Error ("Failed to clean stale beacon headers" , "err" , err )
393
396
}
394
397
}()
395
398
// Wait for the suspend to finish, consuming head events in the meantime
@@ -1120,33 +1123,46 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
1120
1123
number := filled .Number .Uint64 ()
1121
1124
log .Trace ("Cleaning stale beacon headers" , "filled" , number , "hash" , filled .Hash ())
1122
1125
1123
- // If the filled header is below the linked subchain, something's
1124
- // corrupted internally. Report and error and refuse to do anything.
1125
- if number < s .progress .Subchains [0 ].Tail {
1126
+ // If the filled header is below the linked subchain, something's corrupted
1127
+ // internally. Report and error and refuse to do anything.
1128
+ if number + 1 < s .progress .Subchains [0 ].Tail {
1126
1129
return fmt .Errorf ("filled header below beacon header tail: %d < %d" , number , s .progress .Subchains [0 ].Tail )
1127
1130
}
1128
- // Subchain seems trimmable, push the tail forward up to the last
1129
- // filled header and delete everything before it - if available. In
1130
- // case we filled past the head, recreate the subchain with a new
1131
- // head to keep it consistent with the data on disk.
1131
+ // If nothing in subchain is filled, don't bother to do cleanup.
1132
+ if number + 1 == s . progress . Subchains [ 0 ]. Tail {
1133
+ return nil
1134
+ }
1132
1135
var (
1133
- start = s . progress . Subchains [ 0 ]. Tail // start deleting from the first known header
1134
- end = number // delete until the requested threshold
1136
+ start uint64
1137
+ end uint64
1135
1138
batch = s .db .NewBatch ()
1136
1139
)
1137
- s .progress .Subchains [0 ].Tail = number
1138
- s .progress .Subchains [0 ].Next = filled .ParentHash
1139
-
1140
- if s .progress .Subchains [0 ].Head < number {
1141
- // If more headers were filled than available, push the entire
1142
- // subchain forward to keep tracking the node's block imports
1143
- end = s .progress .Subchains [0 ].Head + 1 // delete the entire original range, including the head
1144
- s .progress .Subchains [0 ].Head = number // assign a new head (tail is already assigned to this)
1145
-
1146
- // The entire original skeleton chain was deleted and a new one
1147
- // defined. Make sure the new single-header chain gets pushed to
1148
- // disk to keep internal state consistent.
1149
- rawdb .WriteSkeletonHeader (batch , filled )
1140
+ if number < s .progress .Subchains [0 ].Head {
1141
+ // The skeleton chain is partially consumed, set the new tail as filled+1.
1142
+ tail := rawdb .ReadSkeletonHeader (s .db , number + 1 )
1143
+ if tail .ParentHash != filled .Hash () {
1144
+ return fmt .Errorf ("filled header is discontinuous with subchain: %d %s, please file an issue" , number , filled .Hash ())
1145
+ }
1146
+ start , end = s .progress .Subchains [0 ].Tail , number + 1 // remove headers in [tail, filled]
1147
+ s .progress .Subchains [0 ].Tail = tail .Number .Uint64 ()
1148
+ s .progress .Subchains [0 ].Next = tail .ParentHash
1149
+ } else {
1150
+ // The skeleton chain is fully consumed, set both head and tail as filled.
1151
+ start , end = s .progress .Subchains [0 ].Tail , filled .Number .Uint64 () // remove headers in [tail, filled)
1152
+ s .progress .Subchains [0 ].Tail = filled .Number .Uint64 ()
1153
+ s .progress .Subchains [0 ].Next = filled .ParentHash
1154
+
1155
+ // If more headers were filled than available, push the entire subchain
1156
+ // forward to keep tracking the node's block imports.
1157
+ if number > s .progress .Subchains [0 ].Head {
1158
+ end = s .progress .Subchains [0 ].Head + 1 // delete the entire original range, including the head
1159
+ s .progress .Subchains [0 ].Head = number // assign a new head (tail is already assigned to this)
1160
+
1161
+ // The entire original skeleton chain was deleted and a new one
1162
+ // defined. Make sure the new single-header chain gets pushed to
1163
+ // disk to keep internal state consistent.
1164
+ rawdb .WriteSkeletonHeader (batch , filled )
1165
+ }
1150
1166
}
1151
1167
// Execute the trimming and the potential rewiring of the progress
1152
1168
s .saveSyncStatus (batch )
0 commit comments