@@ -30,18 +30,19 @@ def __init__(self, header=None):
30
30
class SyncTask (object ):
31
31
32
32
"""
33
- synchronizes a the chain starting from a given blockhash
34
- blockchain hash is fetched from a single peer (which led to the unknown blockhash)
35
- blocks are fetched from the best peers
36
-
37
- with missing block:
38
- fetch headers
39
- until known block
40
- for headers
41
- fetch block bodies
42
- for each block body
43
- construct block
44
- chainservice.add_blocks() # blocks if queue is full
33
+ Block header syncing
34
+ When syncing with the original peer, from the latest block head of the
35
+ chain, divide the missing blocks into N sections, each section is made of
36
+ 128 blockheader batches, each batch contains 192 headers, downloading a
37
+ skeleton of first header of each header batch from the original peer, for
38
+ each available idle peer, download header batch in parallel, for
39
+ each batch, match the first header and last header against respected
40
+ skeleton headers, verify header order and save the downloaded batch into a
41
+ header cache and deliver the partially downloaded headers to a queue for
42
+ body downloads scheduling in ascending order.
43
+ When one section’s downloading is complete, move the starting header position to the start of
44
+ next section, if the downloading is interrupted, restart downloading from
45
+ the latest block head of the current chain
45
46
"""
46
47
initial_blockheaders_per_request = 32
47
48
max_blockheaders_per_request = 192
@@ -359,9 +360,17 @@ def receive_blockheaders(self, proto, blockheaders):
359
360
self .requests [proto ].set (blockheaders )
360
361
361
362
362
- class SyncBody (object ):
363
363
364
- max_blocks_per_request = 128
364
+ class SyncBody (object ):
365
+ """
366
+ Handles body syncing
367
+ For each available peer, fetch block bodies in parallel from the task queue
368
+ in batches (128), for each body fetch response, match it against headers in
369
+ the body fetch task queue, if it matches, put the downloaded body in a body
370
+ result cache, delete the corresponding task from task queue, import the
371
+ block bodies from block cache into the chain, remove the imported bodies
372
+ from body cache
373
+ """
365
374
max_blocks_process = 2048
366
375
blocks_request_timeout = 19.
367
376
max_retries = 5
@@ -412,7 +421,12 @@ def run(self):
412
421
print (traceback .format_exc ())
413
422
self .exit (success = False )
414
423
415
-
424
+
425
+ #Body fetch scheduler
426
+ #Body fetch scheduler reads from downloaded header queue, dividing headers
427
+ #into batches(2048 or less), for each header batch adding the headers to the
428
+ #task queue, each queue item contains a task of 128 body fetches, activate
429
+ #body fetcher
416
430
def schedule_block_fetch (self ):
417
431
batch_header = []
418
432
log_st .debug ('start sheduleing blocks' )
0 commit comments