@@ -20,6 +20,7 @@ import (
20
20
"context"
21
21
"encoding/hex"
22
22
"errors"
23
+ "fmt"
23
24
"sync"
24
25
"time"
25
26
@@ -102,11 +103,11 @@ func (b *BzzEth) handleMsg(p *Peer) func(context.Context, interface{}) error {
102
103
p .logger .Trace ("bzzeth.handleMsg" )
103
104
switch msg := msg .(type ) {
104
105
case * NewBlockHeaders :
105
- go b .handleNewBlockHeaders (ctx , p , msg )
106
+ return b .handleNewBlockHeaders (ctx , p , msg )
106
107
case * BlockHeaders :
107
- go b .handleBlockHeaders (ctx , p , msg )
108
+ return b .handleBlockHeaders (ctx , p , msg )
108
109
case * GetBlockHeaders :
109
- go b .handleGetBlockHeaders (ctx , p , msg )
110
+ return b .handleGetBlockHeaders (ctx , p , msg )
110
111
}
111
112
return nil
112
113
}
@@ -123,7 +124,7 @@ func (b *BzzEth) handleMsgFromSwarmNode(p *Peer) func(context.Context, interface
123
124
124
125
// handleNewBlockHeaders handles new header hashes
125
126
// only request headers that are in Kad Nearest Neighbourhood
126
- func (b * BzzEth ) handleNewBlockHeaders (ctx context.Context , p * Peer , msg * NewBlockHeaders ) {
127
+ func (b * BzzEth ) handleNewBlockHeaders (ctx context.Context , p * Peer , msg * NewBlockHeaders ) error {
127
128
p .logger .Trace ("bzzeth.handleNewBlockHeaders" )
128
129
129
130
// collect the addresses of blocks that are not in our localstore
@@ -135,7 +136,7 @@ func (b *BzzEth) handleNewBlockHeaders(ctx context.Context, p *Peer, msg *NewBlo
135
136
yes , err := b .netStore .Store .HasMulti (ctx , addresses ... )
136
137
if err != nil {
137
138
log .Error ("Error checking hashesh in store" , "Reason" , err )
138
- return
139
+ return nil
139
140
}
140
141
141
142
// collect the hashes of block headers we want
@@ -160,7 +161,7 @@ func (b *BzzEth) handleNewBlockHeaders(ctx context.Context, p *Peer, msg *NewBlo
160
161
req , err := p .getBlockHeaders (ctx , hashes , deliveries )
161
162
if err != nil {
162
163
p .logger .Error ("Error sending GetBlockHeader message" , "Reason" , err )
163
- return
164
+ return nil
164
165
}
165
166
defer req .cancel ()
166
167
@@ -172,19 +173,24 @@ func (b *BzzEth) handleNewBlockHeaders(ctx context.Context, p *Peer, msg *NewBlo
172
173
case hdr , ok := <- deliveries :
173
174
if ! ok {
174
175
p .logger .Debug ("bzzeth.handleNewBlockHeaders" , "delivered" , deliveredCnt )
175
- return
176
+ // todo: introduce better errors
177
+ return nil
176
178
}
177
179
ch := newChunk (hdr )
178
180
deliveredCnt ++
179
181
p .logger .Trace ("bzzeth.handleNewBlockHeaders" , "hash" , ch .Address ().Hex (), "delivered" , deliveredCnt )
182
+
183
+ req .lock .RLock ()
180
184
if deliveredCnt == len (req .hashes ) {
181
185
p .logger .Debug ("all headers delivered" , "count" , deliveredCnt )
182
186
finishDeliveryFunc (req .hashes )
183
- return
187
+ req .lock .RUnlock ()
188
+ return nil
184
189
}
190
+ req .lock .RUnlock ()
185
191
case <- ctx .Done ():
186
192
p .logger .Debug ("bzzeth.handleNewBlockHeaders" , "delivered" , deliveredCnt , "err" , err )
187
- return
193
+ return nil
188
194
}
189
195
}
190
196
}
@@ -220,15 +226,14 @@ func finishDelivery(hashes map[string]bool) {
220
226
}
221
227
222
228
// handleBlockHeaders handles block headers message
223
- func (b * BzzEth ) handleBlockHeaders (ctx context.Context , p * Peer , msg * BlockHeaders ) {
229
+ func (b * BzzEth ) handleBlockHeaders (ctx context.Context , p * Peer , msg * BlockHeaders ) error {
224
230
p .logger .Debug ("bzzeth.handleBlockHeaders" , "id" , msg .Rid )
225
231
226
232
// retrieve the request for this id
227
233
req , ok := p .requests .get (msg .Rid )
228
234
if ! ok {
229
- p .logger .Warn ("bzzeth.handleBlockHeaders: nonexisting request id" , "id" , msg .Rid )
230
- p .Drop ("nonexisting request id" )
231
- return
235
+ return fmt .Errorf ("bzzeth.handleBlockHeaders: nonexisting request id %d" , msg .Rid )
236
+
232
237
}
233
238
234
239
// convert rlp.RawValue to bytes
@@ -237,11 +242,7 @@ func (b *BzzEth) handleBlockHeaders(ctx context.Context, p *Peer, msg *BlockHead
237
242
headers [i ] = h
238
243
}
239
244
240
- err := b .deliverAndStoreAll (ctx , req , headers )
241
- if err != nil {
242
- p .logger .Warn ("bzzeth.handleBlockHeaders: fatal dropping peer" , "id" , msg .Rid , "err" , err )
243
- p .Drop ("error on deliverAndStoreAll" )
244
- }
245
+ return b .deliverAndStoreAll (ctx , req , headers )
245
246
}
246
247
247
248
// Validates and headers asynchronously and stores the valid chunks in one go
@@ -263,13 +264,14 @@ func (b *BzzEth) deliverAndStoreAll(ctx context.Context, req *request, headers [
263
264
return nil
264
265
})
265
266
}
266
- // finish storage is used mostly in testing
267
- // in normal scenario.. it just logs Trace
268
- defer finishStorageFunc (chunks )
269
267
270
268
// wait for all validations to get over and close the channels
271
269
err := wg .Wait ()
272
270
271
+ // finish storage is used mostly in testing
272
+ // in normal scenario.. it just logs Trace
273
+ defer finishStorageFunc (chunks )
274
+
273
275
// We want to store even if there is any validation error.
274
276
// since some headers may be valid in the batch.
275
277
// Store all the valid header chunks in one shot
@@ -354,7 +356,7 @@ func arrangeHeader(hashes []chunk.Address, headers []chunk.Address) []chunk.Addr
354
356
355
357
// handles GetBlockHeader requests, in the protocol handler this call is asynchronous
356
358
// so it is safe to have it run until delivery is finished
357
- func (b * BzzEth ) handleGetBlockHeaders (ctx context.Context , p * Peer , msg * GetBlockHeaders ) {
359
+ func (b * BzzEth ) handleGetBlockHeaders (ctx context.Context , p * Peer , msg * GetBlockHeaders ) error {
358
360
p .logger .Debug ("bzzeth.handleGetBlockHeaders" , "id" , msg .Rid )
359
361
total := len (msg .Hashes )
360
362
ctx , osp := spancontext .StartSpan (ctx , "bzzeth.handleGetBlockHeaders" )
@@ -420,6 +422,8 @@ DELIVERY:
420
422
}
421
423
}
422
424
p .logger .Debug ("bzzeth.handleGetBlockHeaders: sent all headers" , "id" , msg .Rid )
425
+
426
+ return nil
423
427
}
424
428
425
429
var batchWait = 100 * time .Millisecond // time to wait for collecting headers in a batch
0 commit comments