Skip to content

Commit 5c67066

Browse files
authored
eth/downloader: fix skeleton cleanup (#28581)
* eth/downloader: fix skeleton cleanup * eth/downloader: short circuit if nothing to delete * eth/downloader: polish the logic in cleanup * eth/downloader: address comments
1 parent 3adf1ce commit 5c67066

File tree

3 files changed

+50
-32
lines changed

3 files changed

+50
-32
lines changed

eth/downloader/beaconsync.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,8 @@ func newBeaconBackfiller(dl *Downloader, success func()) backfiller {
5050
}
5151

5252
// suspend cancels any background downloader threads and returns the last header
53-
// that has been successfully backfilled.
53+
// that has been successfully backfilled (potentially in a previous run), or the
54+
// genesis.
5455
func (b *beaconBackfiller) suspend() *types.Header {
5556
// If no filling is running, don't waste cycles
5657
b.lock.Lock()

eth/downloader/downloader.go

+1
Original file line numberDiff line numberDiff line change
@@ -611,6 +611,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
611611
if err := d.lightchain.SetHead(origin); err != nil {
612612
return err
613613
}
614+
log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin)
614615
}
615616
}
616617
// Initiate the sync using a concurrent header and content retrieval algorithm

eth/downloader/skeleton.go

+47-31
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ type backfiller interface {
161161
// on initial startup.
162162
//
163163
// The method should return the last block header that has been successfully
164-
// backfilled, or nil if the backfiller was not resumed.
164+
// backfilled (in the current or a previous run), falling back to the genesis.
165165
suspend() *types.Header
166166

167167
// resume requests the backfiller to start running fill or snap sync based on
@@ -382,14 +382,17 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
382382
done := make(chan struct{})
383383
go func() {
384384
defer close(done)
385-
if filled := s.filler.suspend(); filled != nil {
386-
// If something was filled, try to delete stale sync helpers. If
387-
// unsuccessful, warn the user, but not much else we can do (it's
388-
// a programming error, just let users report an issue and don't
389-
// choke in the meantime).
390-
if err := s.cleanStales(filled); err != nil {
391-
log.Error("Failed to clean stale beacon headers", "err", err)
392-
}
385+
filled := s.filler.suspend()
386+
if filled == nil {
387+
log.Error("Latest filled block is not available")
388+
return
389+
}
390+
// If something was filled, try to delete stale sync helpers. If
391+
// unsuccessful, warn the user, but not much else we can do (it's
392+
// a programming error, just let users report an issue and don't
393+
// choke in the meantime).
394+
if err := s.cleanStales(filled); err != nil {
395+
log.Error("Failed to clean stale beacon headers", "err", err)
393396
}
394397
}()
395398
// Wait for the suspend to finish, consuming head events in the meantime
@@ -1120,33 +1123,46 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
11201123
number := filled.Number.Uint64()
11211124
log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash())
11221125

1123-
// If the filled header is below the linked subchain, something's
1124-
// corrupted internally. Report and error and refuse to do anything.
1125-
if number < s.progress.Subchains[0].Tail {
1126+
// If the filled header is below the linked subchain, something's corrupted
1127+
// internally. Report and error and refuse to do anything.
1128+
if number+1 < s.progress.Subchains[0].Tail {
11261129
return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail)
11271130
}
1128-
// Subchain seems trimmable, push the tail forward up to the last
1129-
// filled header and delete everything before it - if available. In
1130-
// case we filled past the head, recreate the subchain with a new
1131-
// head to keep it consistent with the data on disk.
1131+
// If nothing in subchain is filled, don't bother to do cleanup.
1132+
if number+1 == s.progress.Subchains[0].Tail {
1133+
return nil
1134+
}
11321135
var (
1133-
start = s.progress.Subchains[0].Tail // start deleting from the first known header
1134-
end = number // delete until the requested threshold
1136+
start uint64
1137+
end uint64
11351138
batch = s.db.NewBatch()
11361139
)
1137-
s.progress.Subchains[0].Tail = number
1138-
s.progress.Subchains[0].Next = filled.ParentHash
1139-
1140-
if s.progress.Subchains[0].Head < number {
1141-
// If more headers were filled than available, push the entire
1142-
// subchain forward to keep tracking the node's block imports
1143-
end = s.progress.Subchains[0].Head + 1 // delete the entire original range, including the head
1144-
s.progress.Subchains[0].Head = number // assign a new head (tail is already assigned to this)
1145-
1146-
// The entire original skeleton chain was deleted and a new one
1147-
// defined. Make sure the new single-header chain gets pushed to
1148-
// disk to keep internal state consistent.
1149-
rawdb.WriteSkeletonHeader(batch, filled)
1140+
if number < s.progress.Subchains[0].Head {
1141+
// The skeleton chain is partially consumed, set the new tail as filled+1.
1142+
tail := rawdb.ReadSkeletonHeader(s.db, number+1)
1143+
if tail.ParentHash != filled.Hash() {
1144+
return fmt.Errorf("filled header is discontinuous with subchain: %d %s, please file an issue", number, filled.Hash())
1145+
}
1146+
start, end = s.progress.Subchains[0].Tail, number+1 // remove headers in [tail, filled]
1147+
s.progress.Subchains[0].Tail = tail.Number.Uint64()
1148+
s.progress.Subchains[0].Next = tail.ParentHash
1149+
} else {
1150+
// The skeleton chain is fully consumed, set both head and tail as filled.
1151+
start, end = s.progress.Subchains[0].Tail, filled.Number.Uint64() // remove headers in [tail, filled)
1152+
s.progress.Subchains[0].Tail = filled.Number.Uint64()
1153+
s.progress.Subchains[0].Next = filled.ParentHash
1154+
1155+
// If more headers were filled than available, push the entire subchain
1156+
// forward to keep tracking the node's block imports.
1157+
if number > s.progress.Subchains[0].Head {
1158+
end = s.progress.Subchains[0].Head + 1 // delete the entire original range, including the head
1159+
s.progress.Subchains[0].Head = number // assign a new head (tail is already assigned to this)
1160+
1161+
// The entire original skeleton chain was deleted and a new one
1162+
// defined. Make sure the new single-header chain gets pushed to
1163+
// disk to keep internal state consistent.
1164+
rawdb.WriteSkeletonHeader(batch, filled)
1165+
}
11501166
}
11511167
// Execute the trimming and the potential rewiring of the progress
11521168
s.saveSyncStatus(batch)

0 commit comments

Comments
 (0)