diff --git a/.gitignore b/.gitignore index b68143af2..648f2dedb 100644 --- a/.gitignore +++ b/.gitignore @@ -16,9 +16,8 @@ cover.out coverage.* .DS_Store resource/zoobc_* -resource/snapshots +resource/snapshots* resource_cluster/zoobc_* cmd/*.new accounts.txt .manual -core/service/testdata/snapshots/* \ No newline at end of file diff --git a/cmd/block/blockGenerator.go b/cmd/block/blockGenerator.go index 4ea6395a8..01a7bfee3 100644 --- a/cmd/block/blockGenerator.go +++ b/cmd/block/blockGenerator.go @@ -22,8 +22,8 @@ import ( ) type ( - mockBlockTypeStatusService struct { - service.BlockTypeStatusService + mockBlockchainStatusService struct { + service.BlockchainStatusService } ) @@ -58,11 +58,11 @@ var ( } ) -func (*mockBlockTypeStatusService) IsFirstDownloadFinished(ct chaintype.ChainType) bool { +func (*mockBlockchainStatusService) IsFirstDownloadFinished(ct chaintype.ChainType) bool { return true } -func (*mockBlockTypeStatusService) IsDownloading(ct chaintype.ChainType) bool { +func (*mockBlockchainStatusService) IsDownloading(ct chaintype.ChainType) bool { return true } @@ -209,7 +209,7 @@ func generateBlocks(numberOfBlocks int, blocksmithSecretPhrase, outputPath strin blocksmith, blockService, log.New(), - &mockBlockTypeStatusService{}, + &mockBlockchainStatusService{}, ) startTime := time.Now().UnixNano() / 1e6 fmt.Printf("generating %d blocks\n", numberOfBlocks) diff --git a/common/constant/p2p.go b/common/constant/p2p.go index 4c2f50522..4be9fedb1 100644 --- a/common/constant/p2p.go +++ b/common/constant/p2p.go @@ -30,9 +30,8 @@ const ( // BlockchainsyncWaitingTime time, in seconds, to wait before start syncing the blockchain BlockchainsyncWaitingTime time.Duration = 5 * time.Second // BlockchainsyncCheckInterval time, in seconds, between checks if spine blocks have finished to be downloaded - BlockchainsyncCheckInterval time.Duration = 10 * time.Second + BlockchainsyncCheckInterval time.Duration = 3 * time.Second // BlockchainsyncSpineTimeout timeout, in seconds, for spine blocks to be downloaded from the network - // FIXME: this is for debugging purposes only and must higher on production, - // where downloading the spine blocks could take longer than 30 minutes - BlockchainsyncSpineTimeout time.Duration = 1800 * time.Second + // download spine blocks and snapshot (if present) timeout + BlockchainsyncSpineTimeout time.Duration = 3600 * time.Second ) diff --git a/common/constant/smith.go b/common/constant/smith.go index 2906ca748..0b4269368 100644 --- a/common/constant/smith.go +++ b/common/constant/smith.go @@ -18,7 +18,8 @@ var ( // CheckTimedOutBlock to use in scheduler to check timedout block while waiting transaction CheckTimedOutBlock = 30 * time.Second SpineChainSmithIdlePeriod = 500 * time.Millisecond - // SpineChainSmithingPeriod one spine block every 5 min + // SpineChainSmithingPeriod one spine block every 5 min (300 seconds) + // @iltoga reduce to 60 for testing locally SpineChainSmithingPeriod = int64(300) MainChainSmithIdlePeriod = 500 * time.Millisecond // MainChainSmithingPeriod one main block every 15 seconds + block pool delay (max +30 seconds) diff --git a/common/constant/snapshot.go b/common/constant/snapshot.go index 8971c00e2..48ac2bf00 100644 --- a/common/constant/snapshot.go +++ b/common/constant/snapshot.go @@ -4,8 +4,13 @@ import "time" const ( // SnapshotGenerationTimeout maximum time, in seconds, allowed for a node to generate a snapshot + // @iltoga reduce to 1 for testing locally MainchainSnapshotGenerationTimeout time.Duration = 10 * time.Minute // 10 minutes before including in spine block // MainchainSnapshotInterval interval in mainchain blocks between snapshots - MainchainSnapshotInterval uint32 = 720 // 720 mainchain blocks (= MinRollbackHeight) - SnapshotChunkSize int = int(100 * 1024) // 100 KB + // @iltoga reduce to 5 for testing locally + MainchainSnapshotInterval uint32 = 720 // 720 mainchain blocks (= MinRollbackHeight) + // @iltoga reduce to 1 for testing locally + SnapshotChunkSize int = int(100 * 1024) // 10 KB + // DownloadSnapshotNumberOfRetries number of times to retry downloading failed snapshot file chunks from other peers + DownloadSnapshotNumberOfRetries uint32 = 3 ) diff --git a/common/model/fileDownload.pb.go b/common/model/fileDownload.pb.go new file mode 100644 index 000000000..c5d8f9e33 --- /dev/null +++ b/common/model/fileDownload.pb.go @@ -0,0 +1,131 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/fileDownload.proto + +package model + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type FileDownloadResponse struct { + FileChunks [][]byte `protobuf:"bytes,1,rep,name=FileChunks,proto3" json:"FileChunks,omitempty"` + Failed []string `protobuf:"bytes,2,rep,name=Failed,proto3" json:"Failed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDownloadResponse) Reset() { *m = FileDownloadResponse{} } +func (m *FileDownloadResponse) String() string { return proto.CompactTextString(m) } +func (*FileDownloadResponse) ProtoMessage() {} +func (*FileDownloadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e7c4a80c0792f337, []int{0} +} + +func (m *FileDownloadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDownloadResponse.Unmarshal(m, b) +} +func (m *FileDownloadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDownloadResponse.Marshal(b, m, deterministic) +} +func (m *FileDownloadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDownloadResponse.Merge(m, src) +} +func (m *FileDownloadResponse) XXX_Size() int { + return xxx_messageInfo_FileDownloadResponse.Size(m) +} +func (m *FileDownloadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FileDownloadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDownloadResponse proto.InternalMessageInfo + +func (m *FileDownloadResponse) GetFileChunks() [][]byte { + if m != nil { + return m.FileChunks + } + return nil +} + +func (m *FileDownloadResponse) GetFailed() []string { + if m != nil { + return m.Failed + } + return nil +} + +// RequestFilesRequest a model request of requesting files to download from a peer +type FileDownloadRequest struct { + FileChunkNames []string `protobuf:"bytes,1,rep,name=FileChunkNames,proto3" json:"FileChunkNames,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDownloadRequest) Reset() { *m = FileDownloadRequest{} } +func (m *FileDownloadRequest) String() string { return proto.CompactTextString(m) } +func (*FileDownloadRequest) ProtoMessage() {} +func (*FileDownloadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e7c4a80c0792f337, []int{1} +} + +func (m *FileDownloadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDownloadRequest.Unmarshal(m, b) +} +func (m *FileDownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDownloadRequest.Marshal(b, m, deterministic) +} +func (m *FileDownloadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDownloadRequest.Merge(m, src) +} +func (m *FileDownloadRequest) XXX_Size() int { + return xxx_messageInfo_FileDownloadRequest.Size(m) +} +func (m *FileDownloadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FileDownloadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDownloadRequest proto.InternalMessageInfo + +func (m *FileDownloadRequest) GetFileChunkNames() []string { + if m != nil { + return m.FileChunkNames + } + return nil +} + +func init() { + proto.RegisterType((*FileDownloadResponse)(nil), "model.FileDownloadResponse") + proto.RegisterType((*FileDownloadRequest)(nil), "model.FileDownloadRequest") +} + +func init() { proto.RegisterFile("model/fileDownload.proto", fileDescriptor_e7c4a80c0792f337) } + +var fileDescriptor_e7c4a80c0792f337 = []byte{ + // 178 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xc8, 0xcd, 0x4f, 0x49, + 0xcd, 0xd1, 0x4f, 0xcb, 0xcc, 0x49, 0x75, 0xc9, 0x2f, 0xcf, 0xcb, 0xc9, 0x4f, 0x4c, 0xd1, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0xcb, 0x28, 0xf9, 0x71, 0x89, 0xb8, 0x21, 0x49, 0x06, + 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0xc9, 0x71, 0x71, 0x81, 0xc4, 0x9d, 0x33, 0x4a, + 0xf3, 0xb2, 0x8b, 0x25, 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x90, 0x44, 0x84, 0xc4, 0xb8, 0xd8, + 0xdc, 0x12, 0x33, 0x73, 0x52, 0x53, 0x24, 0x98, 0x14, 0x98, 0x35, 0x38, 0x83, 0xa0, 0x3c, 0x25, + 0x5b, 0x2e, 0x61, 0x54, 0xf3, 0x0a, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0xd4, 0xb8, 0xf8, 0xe0, 0x9a, + 0xfd, 0x12, 0x73, 0x53, 0x21, 0x46, 0x72, 0x06, 0xa1, 0x89, 0x3a, 0x69, 0x45, 0x69, 0xa4, 0x67, + 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x57, 0xe5, 0xe7, 0x27, 0x25, 0x43, 0x48, + 0xdd, 0xe4, 0xfc, 0xa2, 0x54, 0xfd, 0xe4, 0xfc, 0xdc, 0xdc, 0xfc, 0x3c, 0x7d, 0xb0, 0xd3, 0x93, + 0xd8, 0xc0, 0x1e, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x21, 0xf8, 0xd6, 0xb2, 0xe4, 0x00, + 0x00, 0x00, +} diff --git a/common/model/mapIntBool.go b/common/model/mapIntBool.go new file mode 100644 index 000000000..f3284e34e --- /dev/null +++ b/common/model/mapIntBool.go @@ -0,0 +1,53 @@ +package model + +import "sync" + +type MapIntBool struct { + sync.RWMutex + internal map[int32]bool +} + +func NewMapIntBool() *MapIntBool { + return &MapIntBool{ + internal: make(map[int32]bool), + } +} + +func (rm *MapIntBool) Load(key int32) (value, ok bool) { + rm.RLock() + result, ok := rm.internal[key] + rm.RUnlock() + return result, ok +} + +func (rm *MapIntBool) Delete(key int32) { + rm.Lock() + delete(rm.internal, key) + rm.Unlock() +} + +func (rm *MapIntBool) Store(key int32, value bool) { + rm.Lock() + rm.internal[key] = value + rm.Unlock() +} + +func (rm *MapIntBool) Count() int { + rm.RLock() + result := len(rm.internal) + rm.RUnlock() + return result +} + +func (rm *MapIntBool) Reset() { + rm.Lock() + rm.internal = NewMapIntBool().internal + rm.Unlock() +} + +func (rm *MapIntBool) GetMap() map[int32]bool { + rm.RLock() + result := rm.internal + rm.RUnlock() + return result +} diff --git a/common/model/mapStringInt.go b/common/model/mapStringInt.go new file mode 100644 index 000000000..b1c116c8a --- /dev/null +++ b/common/model/mapStringInt.go @@ -0,0 +1,53 @@ +package model + +import "sync" + +type MapStringInt struct { + sync.RWMutex + internal map[string]int64 +} + +func NewMapStringInt() *MapStringInt { + return &MapStringInt{ + internal: make(map[string]int64), + } +} + +func (rm *MapStringInt) Load(key string) (value int64, ok bool) { + rm.RLock() + result, ok := rm.internal[key] + rm.RUnlock() + return result, ok +} + +func (rm *MapStringInt) Delete(key string) { + rm.Lock() + delete(rm.internal, key) + rm.Unlock() +} + +func (rm *MapStringInt) Store(key string, value int64) { + rm.Lock() + rm.internal[key] = value + rm.Unlock() +} + +func (rm *MapStringInt) Count() int { + rm.RLock() + result := len(rm.internal) + rm.RUnlock() + return result +} + +func (rm *MapStringInt) Reset() { + rm.Lock() + rm.internal = NewMapStringInt().internal + rm.Unlock() +} + +func (rm *MapStringInt) GetMap() map[string]int64 { + rm.RLock() + result := rm.internal + rm.RUnlock() + return result +} diff --git a/common/monitoring/metricsMonitoring.go b/common/monitoring/metricsMonitoring.go index 274c48f66..d70a2108c 100644 --- a/common/monitoring/metricsMonitoring.go +++ b/common/monitoring/metricsMonitoring.go @@ -58,15 +58,21 @@ var ( goRoutineActivityCounters = make(map[string]prometheus.Gauge) goRoutineActivityCountersSync sync.Mutex + + snapshotDownloadRequestCounter prometheus.Counter + snapshotDownloadRequestFailedCounter prometheus.Counter + snapshotDownloadRequestCounterSync sync.Mutex ) const ( - P2pGetPeerInfoServer = "P2pGetPeerInfoServer" - P2pGetMorePeersServer = "P2pGetMorePeersServer" - P2pSendPeersServer = "P2pSendPeersServer" - P2pSendBlockServer = "P2pSendBlockServer" - P2pSendTransactionServer = "P2pSendTransactionServer" + P2pGetPeerInfoServer = "P2pGetPeerInfoServer" + P2pGetMorePeersServer = "P2pGetMorePeersServer" + P2pSendPeersServer = "P2pSendPeersServer" + P2pSendBlockServer = "P2pSendBlockServer" + P2pSendTransactionServer = "P2pSendTransactionServer" + // @iltoga for @ali this is unused. can we delete it? P2pRequestBlockTransactionsServer = "P2pRequestBlockTransactionsServer" + P2pRequestFileDownloadServer = "P2pRequestFileDownloadServer" P2pGetCumulativeDifficultyServer = "P2pGetCumulativeDifficultyServer" P2pGetCommonMilestoneBlockIDsServer = "P2pGetCommonMilestoneBlockIDsServer" P2pGetNextBlockIDsServer = "P2pGetNextBlockIDsServer" @@ -78,6 +84,7 @@ const ( P2pSendBlockClient = "P2pSendBlockClient" P2pSendTransactionClient = "P2pSendTransactionClient" P2pRequestBlockTransactionsClient = "P2pRequestBlockTransactionsClient" + P2pRequestFileDownloadClient = "P2pRequestFileDownloadClient" P2pGetCumulativeDifficultyClient = "P2pGetCumulativeDifficultyClient" P2pGetCommonMilestoneBlockIDsClient = "P2pGetCommonMilestoneBlockIDsClient" P2pGetNextBlockIDsClient = "P2pGetNextBlockIDsClient" @@ -182,7 +189,7 @@ func SetUnresolvedPriorityPeersCount(count int) { if unresolvedPriorityPeersCounter == nil { unresolvedPriorityPeersCounter = prometheus.NewGauge(prometheus.GaugeOpts{ Name: fmt.Sprintf("zoobc_unresolved_priority_peers"), - Help: fmt.Sprintf("priority resolvedPeers counter"), + Help: fmt.Sprintf("priority unresolvedPeers counter"), }) prometheus.MustRegister(unresolvedPriorityPeersCounter) } @@ -406,3 +413,34 @@ func DecrementGoRoutineActivity(activityName string) { } goRoutineActivityCounters[activityName].Dec() } + +func IncrementSnapshotDownloadCounter(succeeded, failed int32) { + if !isMonitoringActive { + return + } + + snapshotDownloadRequestCounterSync.Lock() + defer snapshotDownloadRequestCounterSync.Unlock() + if succeeded > 0 { + if snapshotDownloadRequestCounter == nil { + snapshotDownloadRequestCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: fmt.Sprintf("zoobc_snapshot_chunk_downloads"), + Help: fmt.Sprintf("snapshot file chunks succeeded to download"), + }) + prometheus.MustRegister(snapshotDownloadRequestCounter) + } + + snapshotDownloadRequestCounter.Add(float64(succeeded)) + } + if failed > 0 { + if snapshotDownloadRequestFailedCounter == nil { + snapshotDownloadRequestFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: fmt.Sprintf("zoobc_snapshot_chunk_downloads_failed"), + Help: fmt.Sprintf("snapshot file chunks failed to download"), + }) + prometheus.MustRegister(snapshotDownloadRequestFailedCounter) + } + + snapshotDownloadRequestFailedCounter.Add(float64(failed)) + } +} diff --git a/common/query/query.go b/common/query/query.go index 18863a7c7..00d82d496 100644 --- a/common/query/query.go +++ b/common/query/query.go @@ -34,6 +34,7 @@ func GetDerivedQuery(ct chaintype.ChainType) (derivedQuery []DerivedQuery) { NewPendingTransactionQuery(), NewPendingSignatureQuery(), NewMultisignatureInfoQuery(), + NewSpineBlockManifestQuery(), } derivedQuery = append(derivedQuery, mainchainDerivedQuery...) case *chaintype.SpineChain: diff --git a/common/query/query_test.go b/common/query/query_test.go index 82d8fb939..779f0567c 100644 --- a/common/query/query_test.go +++ b/common/query/query_test.go @@ -36,6 +36,7 @@ func TestGetDerivedQuery(t *testing.T) { NewPendingTransactionQuery(), NewPendingSignatureQuery(), NewMultisignatureInfoQuery(), + NewSpineBlockManifestQuery(), }, }, { diff --git a/common/query/magablockQuery.go b/common/query/spineBlockManifestQuery.go similarity index 75% rename from common/query/magablockQuery.go rename to common/query/spineBlockManifestQuery.go index ae4167cc2..062202749 100644 --- a/common/query/magablockQuery.go +++ b/common/query/spineBlockManifestQuery.go @@ -3,6 +3,7 @@ package query import ( "database/sql" "fmt" + "github.com/zoobc/zoobc-core/common/constant" "strings" "github.com/zoobc/zoobc-core/common/chaintype" @@ -13,7 +14,7 @@ import ( type ( SpineBlockManifestQueryInterface interface { InsertSpineBlockManifest(spineBlockManifest *model.SpineBlockManifest) (str string, args []interface{}) - GetSpineBlockManifestsInTimeInterval(fromTimestamp, toTimestamp int64) string + GetSpineBlockManifestTimeInterval(fromTimestamp, toTimestamp int64) string GetLastSpineBlockManifest(ct chaintype.ChainType, mbType model.SpineBlockManifestType) string ExtractModel(mb *model.SpineBlockManifest) []interface{} BuildModel(spineBlockManifests []*model.SpineBlockManifest, rows *sql.Rows) ([]*model.SpineBlockManifest, error) @@ -45,12 +46,15 @@ func (mbl *SpineBlockManifestQuery) getTableName() string { return mbl.TableName } -// InsertSpineBlockManifest +// InsertSpineBlockManifest insert new spine block manifest +// Note: a new one with same id will replace a previous one, if present. +// this is to allow blocks downloaded from peers to override spine block manifests created locally and insure that the correct +// snapshot is downloaded by the node when first joins the network func (mbl *SpineBlockManifestQuery) InsertSpineBlockManifest( spineBlockManifest *model.SpineBlockManifest, ) (str string, args []interface{}) { qryInsert := fmt.Sprintf( - "INSERT INTO %s (%s) VALUES(%s)", + "INSERT OR REPLACE INTO %s (%s) VALUES(%s)", mbl.getTableName(), strings.Join(mbl.Fields, ","), fmt.Sprintf("? %s", strings.Repeat(", ?", len(mbl.Fields)-1)), @@ -65,9 +69,9 @@ func (mbl *SpineBlockManifestQuery) GetLastSpineBlockManifest(ct chaintype.Chain return query } -// GetSpineBlockManifestsInTimeInterval retrieve all spineBlockManifests within a time frame +// GetSpineBlockManifestTimeInterval retrieve all spineBlockManifests within a time frame // Note: it is used to get all entities that have expired between spine blocks -func (mbl *SpineBlockManifestQuery) GetSpineBlockManifestsInTimeInterval(fromTimestamp, toTimestamp int64) string { +func (mbl *SpineBlockManifestQuery) GetSpineBlockManifestTimeInterval(fromTimestamp, toTimestamp int64) string { query := fmt.Sprintf("SELECT %s FROM %s WHERE manifest_timestamp > %d AND manifest_timestamp <= %d "+ "ORDER BY manifest_type, chain_type, manifest_reference_height", strings.Join(mbl.Fields, ", "), mbl.getTableName(), fromTimestamp, toTimestamp) @@ -131,3 +135,15 @@ func (mbl *SpineBlockManifestQuery) Scan(mb *model.SpineBlockManifest, row *sql. } return nil } + +// Rollback delete records `WHERE block_height > "height - constant.MinRollbackBlocks"` +// Note: we subtract constant.MinRollbackBlocks from height because that's the block height the snapshot is taken in respect of current +// block height +func (mbl *SpineBlockManifestQuery) Rollback(height uint32) (multiQueries [][]interface{}) { + return [][]interface{}{ + { + fmt.Sprintf("DELETE FROM %s WHERE manifest_reference_height > ?", mbl.getTableName()), + height - constant.MinRollbackBlocks, + }, + } +} diff --git a/common/query/magablockQuery_test.go b/common/query/spineBlockManifestQuery_test.go similarity index 89% rename from common/query/magablockQuery_test.go rename to common/query/spineBlockManifestQuery_test.go index d621fdd1c..7173f2d9c 100644 --- a/common/query/magablockQuery_test.go +++ b/common/query/spineBlockManifestQuery_test.go @@ -36,7 +36,8 @@ func TestSpineBlockManifestQuery_InsertSpineBlockManifest(t *testing.T) { args: args{ spineBlockManifest: mb1, }, - want: "INSERT INTO spine_block_manifest (id,full_file_hash,file_chunk_hashes,manifest_reference_height,chain_type,manifest_type," + + want: "INSERT OR REPLACE INTO spine_block_manifest (id,full_file_hash,file_chunk_hashes,manifest_reference_height," + + "chain_type,manifest_type," + "manifest_timestamp) VALUES(? , ?, ?, ?, ?, ?, ?)", }, } @@ -112,7 +113,7 @@ func TestSpineBlockManifestQuery_GetSpineBlockManifestsInTimeInterval(t *testing want string }{ { - name: "GetSpineBlockManifestsInTimeInterval:success", + name: "GetSpineBlockManifestTimeInterval:success", fields: fields{ Fields: NewSpineBlockManifestQuery().Fields, TableName: NewSpineBlockManifestQuery().TableName, @@ -132,8 +133,8 @@ func TestSpineBlockManifestQuery_GetSpineBlockManifestsInTimeInterval(t *testing Fields: tt.fields.Fields, TableName: tt.fields.TableName, } - if got := mbl.GetSpineBlockManifestsInTimeInterval(tt.args.fromTimestamp, tt.args.toTimestamp); got != tt.want { - t.Errorf("SpineBlockManifestQuery.GetSpineBlockManifestsInTimeInterval() = %v, want %v", got, tt.want) + if got := mbl.GetSpineBlockManifestTimeInterval(tt.args.fromTimestamp, tt.args.toTimestamp); got != tt.want { + t.Errorf("SpineBlockManifestQuery.GetSpineBlockManifestTimeInterval() = %v, want %v", got, tt.want) } }) } diff --git a/common/schema b/common/schema index 5ebdfba91..5dc0471a4 160000 --- a/common/schema +++ b/common/schema @@ -1 +1 @@ -Subproject commit 5ebdfba91c350cecc0a1b9b78f2c60d0d38e2b3f +Subproject commit 5dc0471a46937033d7bb031eee1e8bed2350b207 diff --git a/common/service/fileDownload.pb.go b/common/service/fileDownload.pb.go new file mode 100644 index 000000000..4c1bdaafd --- /dev/null +++ b/common/service/fileDownload.pb.go @@ -0,0 +1,126 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: service/fileDownload.proto + +package service + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + model "github.com/zoobc/zoobc-core/common/model" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("service/fileDownload.proto", fileDescriptor_fde5bac5b80ffd69) } + +var fileDescriptor_fde5bac5b80ffd69 = []byte{ + // 193 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2a, 0x4e, 0x2d, 0x2a, + 0xcb, 0x4c, 0x4e, 0xd5, 0x4f, 0xcb, 0xcc, 0x49, 0x75, 0xc9, 0x2f, 0xcf, 0xcb, 0xc9, 0x4f, 0x4c, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0xca, 0x49, 0x49, 0xe4, 0xe6, 0xa7, 0xa4, + 0xe6, 0x60, 0x51, 0x22, 0x25, 0x93, 0x9e, 0x9f, 0x9f, 0x9e, 0x93, 0xaa, 0x9f, 0x58, 0x90, 0xa9, + 0x9f, 0x98, 0x97, 0x97, 0x5f, 0x92, 0x58, 0x92, 0x99, 0x9f, 0x57, 0x0c, 0x91, 0x35, 0xaa, 0xe5, + 0x12, 0x76, 0x43, 0xd2, 0x13, 0x0c, 0x31, 0x4e, 0x28, 0x8d, 0x8b, 0x07, 0x59, 0x58, 0x48, 0x4a, + 0x0f, 0x6c, 0xbe, 0x1e, 0xb2, 0x60, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x34, 0x56, + 0xb9, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x25, 0xd9, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x89, 0x0b, + 0x89, 0xea, 0x97, 0x19, 0x82, 0x5d, 0xa7, 0x8f, 0xac, 0xcc, 0x49, 0x27, 0x4a, 0x2b, 0x3d, 0xb3, + 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0xbf, 0x2a, 0x3f, 0x3f, 0x29, 0x19, 0x42, 0xea, + 0x26, 0xe7, 0x17, 0xa5, 0xea, 0x27, 0xe7, 0xe7, 0xe6, 0xe6, 0xe7, 0xe9, 0x43, 0x3d, 0x99, 0xc4, + 0x06, 0x76, 0xb3, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xaf, 0x05, 0xa4, 0x12, 0x01, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// FileDownloadServiceClient is the client API for FileDownloadService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FileDownloadServiceClient interface { + FileDownload(ctx context.Context, in *model.FileDownloadRequest, opts ...grpc.CallOption) (*model.FileDownloadResponse, error) +} + +type fileDownloadServiceClient struct { + cc *grpc.ClientConn +} + +func NewFileDownloadServiceClient(cc *grpc.ClientConn) FileDownloadServiceClient { + return &fileDownloadServiceClient{cc} +} + +func (c *fileDownloadServiceClient) FileDownload(ctx context.Context, in *model.FileDownloadRequest, opts ...grpc.CallOption) (*model.FileDownloadResponse, error) { + out := new(model.FileDownloadResponse) + err := c.cc.Invoke(ctx, "/service.FileDownloadService/FileDownload", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FileDownloadServiceServer is the server API for FileDownloadService service. +type FileDownloadServiceServer interface { + FileDownload(context.Context, *model.FileDownloadRequest) (*model.FileDownloadResponse, error) +} + +// UnimplementedFileDownloadServiceServer can be embedded to have forward compatible implementations. +type UnimplementedFileDownloadServiceServer struct { +} + +func (*UnimplementedFileDownloadServiceServer) FileDownload(ctx context.Context, req *model.FileDownloadRequest) (*model.FileDownloadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FileDownload not implemented") +} + +func RegisterFileDownloadServiceServer(s *grpc.Server, srv FileDownloadServiceServer) { + s.RegisterService(&_FileDownloadService_serviceDesc, srv) +} + +func _FileDownloadService_FileDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(model.FileDownloadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FileDownloadServiceServer).FileDownload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/service.FileDownloadService/FileDownload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FileDownloadServiceServer).FileDownload(ctx, req.(*model.FileDownloadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _FileDownloadService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "service.FileDownloadService", + HandlerType: (*FileDownloadServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FileDownload", + Handler: _FileDownloadService_FileDownload_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "service/fileDownload.proto", +} diff --git a/common/service/fileDownload.pb.gw.go b/common/service/fileDownload.pb.gw.go new file mode 100644 index 000000000..d14a66ff9 --- /dev/null +++ b/common/service/fileDownload.pb.gw.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: service/fileDownload.proto + +/* +Package service is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package service + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/zoobc/zoobc-core/common/model" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_FileDownloadService_FileDownload_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_FileDownloadService_FileDownload_0(ctx context.Context, marshaler runtime.Marshaler, client FileDownloadServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq model.FileDownloadRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FileDownloadService_FileDownload_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.FileDownload(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterFileDownloadServiceHandlerFromEndpoint is same as RegisterFileDownloadServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterFileDownloadServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterFileDownloadServiceHandler(ctx, mux, conn) +} + +// RegisterFileDownloadServiceHandler registers the http handlers for service FileDownloadService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterFileDownloadServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterFileDownloadServiceHandlerClient(ctx, mux, NewFileDownloadServiceClient(conn)) +} + +// RegisterFileDownloadServiceHandlerClient registers the http handlers for service FileDownloadService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "FileDownloadServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "FileDownloadServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "FileDownloadServiceClient" to call the correct interceptors. +func RegisterFileDownloadServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client FileDownloadServiceClient) error { + + mux.Handle("GET", pattern_FileDownloadService_FileDownload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_FileDownloadService_FileDownload_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_FileDownloadService_FileDownload_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_FileDownloadService_FileDownload_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "file", "FileDownload"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_FileDownloadService_FileDownload_0 = runtime.ForwardResponseMessage +) diff --git a/common/service/p2pCommunication.pb.go b/common/service/p2pCommunication.pb.go index 225f4df3b..29ff0e3e1 100644 --- a/common/service/p2pCommunication.pb.go +++ b/common/service/p2pCommunication.pb.go @@ -28,35 +28,37 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("service/p2pCommunication.proto", fileDescriptor_5d547fbc25d9babc) } var fileDescriptor_5d547fbc25d9babc = []byte{ - // 434 bytes of a gzipped FileDescriptorProto + // 470 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0xef, 0x69, 0x08, 0x6f, 0x68, 0x9b, 0x25, 0xe8, 0xc8, 0xb4, 0x1d, 0x0a, 0x4c, 0x80, 0x20, - 0x91, 0x0a, 0x12, 0x07, 0x2e, 0xb0, 0x15, 0x45, 0x15, 0x6a, 0x55, 0x15, 0x4e, 0xdc, 0x12, 0xe7, - 0x95, 0x1a, 0x62, 0xbf, 0x10, 0x3b, 0x15, 0xe5, 0xcb, 0x83, 0x92, 0x38, 0x8e, 0x43, 0x53, 0xd8, - 0xa5, 0x87, 0xdf, 0x7f, 0xbb, 0x49, 0xc8, 0xa5, 0x82, 0x7c, 0xc3, 0x19, 0x04, 0xd9, 0x38, 0xbb, - 0x41, 0x21, 0x0a, 0xc9, 0x59, 0xa4, 0x39, 0x4a, 0x3f, 0xcb, 0x51, 0x23, 0xbd, 0x63, 0x78, 0xef, - 0x44, 0x60, 0x02, 0x69, 0x90, 0x01, 0xe4, 0x35, 0xd5, 0x20, 0x12, 0x13, 0x30, 0xc8, 0x69, 0x8d, - 0x80, 0xc8, 0xf4, 0xb6, 0x0b, 0xc5, 0x29, 0xb2, 0xef, 0x06, 0x7a, 0xe0, 0x40, 0x6c, 0x1d, 0x71, - 0x53, 0xe5, 0x0d, 0x6b, 0x5c, 0xe7, 0x91, 0x54, 0x11, 0x6b, 0x37, 0x8c, 0x7f, 0x1f, 0x90, 0x93, - 0xc5, 0x78, 0xd1, 0x99, 0x47, 0xdf, 0x90, 0xc3, 0x10, 0xf4, 0x02, 0x20, 0x9f, 0xca, 0x15, 0xd2, - 0x87, 0x7e, 0xe5, 0xf6, 0x1d, 0x6c, 0x09, 0x3f, 0x0a, 0x50, 0xda, 0x3b, 0x34, 0xd4, 0x1c, 0x13, - 0x18, 0x0d, 0xe8, 0x5b, 0x72, 0x14, 0x82, 0x9e, 0x61, 0x0e, 0xa5, 0x50, 0xd1, 0x23, 0x43, 0x7f, - 0x28, 0x57, 0x7b, 0xe7, 0x6d, 0x8e, 0x95, 0x2c, 0x41, 0x65, 0x28, 0x55, 0x69, 0x7e, 0x4d, 0xee, - 0x7e, 0x02, 0x99, 0xd4, 0xce, 0xa1, 0xd1, 0x5a, 0xa4, 0x69, 0xec, 0x44, 0x8e, 0x06, 0xf4, 0x5d, - 0xed, 0xba, 0x2e, 0x4f, 0xdc, 0x71, 0x55, 0x48, 0xe3, 0x3a, 0xdb, 0x25, 0x6c, 0xef, 0x92, 0x1c, - 0x97, 0xf0, 0xe7, 0xf6, 0x6e, 0xe8, 0x85, 0x23, 0x77, 0xf0, 0x26, 0xed, 0x72, 0x1f, 0x6d, 0x33, - 0x13, 0x72, 0xdf, 0x56, 0x39, 0x0a, 0x45, 0x1f, 0xfd, 0x3d, 0xc4, 0x65, 0x9b, 0xfc, 0xc7, 0xff, - 0x16, 0x39, 0xcb, 0xcf, 0x8c, 0x65, 0xb7, 0xe8, 0xca, 0x64, 0xec, 0x13, 0xec, 0xbb, 0xcf, 0x6f, - 0x64, 0x18, 0x82, 0xbe, 0x29, 0x44, 0x91, 0x46, 0x9a, 0x6f, 0x60, 0xc2, 0x57, 0x2b, 0xce, 0x8a, - 0x54, 0x6f, 0xe9, 0x93, 0xf6, 0xff, 0xeb, 0xe3, 0x9b, 0xc4, 0xab, 0xff, 0xc9, 0xec, 0x7e, 0x45, - 0xbc, 0x52, 0x84, 0x42, 0xa0, 0x9c, 0xf1, 0x14, 0x94, 0x46, 0x09, 0xd5, 0xd2, 0xe9, 0x44, 0xd1, - 0xa7, 0x4e, 0x4e, 0x9f, 0x24, 0xb1, 0x67, 0x78, 0x76, 0x0b, 0xa5, 0x2d, 0xfd, 0x48, 0x8e, 0x43, - 0xd0, 0x73, 0xf8, 0xa9, 0x6d, 0xd3, 0x45, 0xeb, 0x6f, 0xf1, 0x36, 0xbe, 0x79, 0xaa, 0x7a, 0xc2, - 0xde, 0x93, 0x7b, 0xae, 0x49, 0xd1, 0xf3, 0x9e, 0x28, 0x1b, 0x74, 0xea, 0x06, 0xa9, 0x49, 0xa4, - 0xa3, 0xd1, 0xe0, 0xfa, 0xc5, 0x97, 0xe7, 0x5f, 0xb9, 0x5e, 0x17, 0xb1, 0xcf, 0x50, 0x04, 0xbf, - 0x10, 0x63, 0x56, 0xff, 0xbe, 0x64, 0x98, 0x43, 0xc0, 0xaa, 0x03, 0x05, 0xe6, 0x53, 0x11, 0x1f, - 0x54, 0xaf, 0xed, 0xab, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xeb, 0x94, 0x68, 0x5c, 0x04, - 0x00, 0x00, + 0x14, 0xef, 0x89, 0x09, 0x6f, 0x68, 0x9b, 0x11, 0xb4, 0xa4, 0xda, 0x0e, 0x05, 0x26, 0x40, 0xd0, + 0x48, 0x05, 0x89, 0x03, 0x17, 0xd8, 0x0a, 0xd5, 0x84, 0x36, 0x55, 0x85, 0x13, 0x37, 0xd7, 0x79, + 0x65, 0x86, 0xd8, 0x2f, 0xc4, 0xce, 0x60, 0x7c, 0x4b, 0xbe, 0x11, 0x4a, 0xfc, 0x27, 0x2e, 0x4b, + 0x81, 0x4b, 0x0e, 0xbf, 0x7f, 0xef, 0xe7, 0x97, 0xc4, 0xe4, 0x50, 0x43, 0x79, 0x29, 0x38, 0xa4, + 0xc5, 0xa4, 0x38, 0x41, 0x29, 0x2b, 0x25, 0x38, 0x33, 0x02, 0xd5, 0xb8, 0x28, 0xd1, 0x20, 0xdd, + 0x72, 0x7c, 0xb2, 0x27, 0x31, 0x83, 0x3c, 0x2d, 0x00, 0x4a, 0x4b, 0x79, 0x44, 0x61, 0x06, 0x0e, + 0xd9, 0xb7, 0x08, 0xc8, 0xc2, 0x5c, 0xad, 0x43, 0xcb, 0x1c, 0xf9, 0x57, 0x07, 0xdd, 0x8d, 0x20, + 0x7e, 0xc1, 0x84, 0x1b, 0x95, 0xf4, 0x2d, 0x6e, 0x4a, 0xa6, 0x34, 0xe3, 0x6d, 0x87, 0x64, 0x60, + 0x89, 0x95, 0xc8, 0x61, 0x8a, 0xdf, 0x55, 0x8e, 0x2c, 0xb3, 0xcc, 0xe4, 0xd7, 0x16, 0xd9, 0x9b, + 0x4f, 0xe6, 0x6b, 0xc5, 0xe9, 0x4b, 0xb2, 0x3d, 0x03, 0x33, 0x07, 0x28, 0x4f, 0xd5, 0x0a, 0xe9, + 0xbd, 0x71, 0x63, 0x1f, 0x47, 0xd8, 0x02, 0xbe, 0x55, 0xa0, 0x4d, 0xb2, 0xed, 0xa8, 0x73, 0xcc, + 0x60, 0xd4, 0xa3, 0xaf, 0xc8, 0xce, 0x0c, 0xcc, 0x19, 0x96, 0x50, 0x0b, 0x35, 0xdd, 0x71, 0xf4, + 0xdb, 0xfa, 0x3c, 0xc9, 0xb0, 0xcd, 0x09, 0x92, 0x05, 0xe8, 0x02, 0x95, 0xae, 0xcd, 0x2f, 0xc8, + 0xcd, 0x0f, 0xa0, 0x32, 0xeb, 0xec, 0x3b, 0x6d, 0x40, 0xfc, 0xc4, 0xb5, 0xc8, 0x51, 0x8f, 0xbe, + 0xb6, 0xae, 0xe3, 0x7a, 0x17, 0x6b, 0xae, 0x06, 0xf1, 0xae, 0xc1, 0x75, 0x22, 0xcc, 0x5d, 0x90, + 0xdd, 0x1a, 0xfe, 0xd8, 0x6e, 0x8d, 0x1e, 0x44, 0xf2, 0x08, 0xf7, 0x69, 0x87, 0x9b, 0xe8, 0x90, + 0x99, 0x91, 0x3b, 0x61, 0x54, 0xa4, 0xd0, 0xf4, 0xfe, 0x9f, 0x45, 0x62, 0xd6, 0xe7, 0x3f, 0xf8, + 0xbb, 0x28, 0x6a, 0x3e, 0x70, 0x96, 0xeb, 0x83, 0x8e, 0x5c, 0xc6, 0x26, 0xc1, 0xa6, 0x7d, 0x7e, + 0x21, 0xfd, 0x19, 0x98, 0x93, 0x4a, 0x56, 0x39, 0x33, 0xe2, 0x12, 0xa6, 0x62, 0xb5, 0x12, 0xbc, + 0xca, 0xcd, 0x15, 0x7d, 0xd8, 0xbe, 0xbf, 0x2e, 0xde, 0x27, 0x1e, 0xfd, 0x4b, 0x16, 0xfa, 0x6b, + 0x92, 0xd4, 0x22, 0x94, 0x12, 0xd5, 0x99, 0xc8, 0x41, 0x1b, 0x54, 0xd0, 0x34, 0x3d, 0x9d, 0x6a, + 0xfa, 0x28, 0xca, 0xe9, 0x92, 0x64, 0xe1, 0x0c, 0x8f, 0xff, 0x43, 0x19, 0x86, 0xbe, 0x27, 0xbb, + 0x33, 0x30, 0xe7, 0xf0, 0xc3, 0x84, 0x49, 0x07, 0xad, 0xbf, 0xc5, 0xdb, 0x78, 0xff, 0x55, 0x75, + 0x84, 0xbd, 0x21, 0xb7, 0x62, 0x93, 0xa6, 0xc3, 0x8e, 0xa8, 0x10, 0xb4, 0x1f, 0x07, 0xe9, 0x29, + 0x33, 0x6c, 0xd4, 0xa3, 0x73, 0x72, 0xdb, 0xf1, 0xef, 0xa2, 0xdf, 0x93, 0x26, 0x4e, 0x1b, 0x83, + 0x3e, 0x67, 0xd8, 0xc9, 0xf9, 0x52, 0xc7, 0x4f, 0x3f, 0x3d, 0xf9, 0x2c, 0xcc, 0x45, 0xb5, 0x1c, + 0x73, 0x94, 0xe9, 0x4f, 0xc4, 0x25, 0xb7, 0xcf, 0x67, 0x1c, 0x4b, 0x48, 0x79, 0xb3, 0xa2, 0xd4, + 0x5d, 0x4b, 0xcb, 0x1b, 0xcd, 0x45, 0xf0, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x77, 0x97, + 0x9d, 0xeb, 0xc8, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -82,6 +84,7 @@ type P2PCommunicationClient interface { GetCommonMilestoneBlockIDs(ctx context.Context, in *model.GetCommonMilestoneBlockIdsRequest, opts ...grpc.CallOption) (*model.GetCommonMilestoneBlockIdsResponse, error) GetNextBlockIDs(ctx context.Context, in *model.GetNextBlockIdsRequest, opts ...grpc.CallOption) (*model.BlockIdsResponse, error) GetNextBlocks(ctx context.Context, in *model.GetNextBlocksRequest, opts ...grpc.CallOption) (*model.BlocksData, error) + RequestFileDownload(ctx context.Context, in *model.FileDownloadRequest, opts ...grpc.CallOption) (*model.FileDownloadResponse, error) } type p2PCommunicationClient struct { @@ -191,6 +194,15 @@ func (c *p2PCommunicationClient) GetNextBlocks(ctx context.Context, in *model.Ge return out, nil } +func (c *p2PCommunicationClient) RequestFileDownload(ctx context.Context, in *model.FileDownloadRequest, opts ...grpc.CallOption) (*model.FileDownloadResponse, error) { + out := new(model.FileDownloadResponse) + err := c.cc.Invoke(ctx, "/service.P2PCommunication/RequestFileDownload", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // P2PCommunicationServer is the server API for P2PCommunication service. type P2PCommunicationServer interface { GetPeerInfo(context.Context, *model.GetPeerInfoRequest) (*model.Node, error) @@ -204,6 +216,7 @@ type P2PCommunicationServer interface { GetCommonMilestoneBlockIDs(context.Context, *model.GetCommonMilestoneBlockIdsRequest) (*model.GetCommonMilestoneBlockIdsResponse, error) GetNextBlockIDs(context.Context, *model.GetNextBlockIdsRequest) (*model.BlockIdsResponse, error) GetNextBlocks(context.Context, *model.GetNextBlocksRequest) (*model.BlocksData, error) + RequestFileDownload(context.Context, *model.FileDownloadRequest) (*model.FileDownloadResponse, error) } // UnimplementedP2PCommunicationServer can be embedded to have forward compatible implementations. @@ -243,6 +256,9 @@ func (*UnimplementedP2PCommunicationServer) GetNextBlockIDs(ctx context.Context, func (*UnimplementedP2PCommunicationServer) GetNextBlocks(ctx context.Context, req *model.GetNextBlocksRequest) (*model.BlocksData, error) { return nil, status.Errorf(codes.Unimplemented, "method GetNextBlocks not implemented") } +func (*UnimplementedP2PCommunicationServer) RequestFileDownload(ctx context.Context, req *model.FileDownloadRequest) (*model.FileDownloadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestFileDownload not implemented") +} func RegisterP2PCommunicationServer(s *grpc.Server, srv P2PCommunicationServer) { s.RegisterService(&_P2PCommunication_serviceDesc, srv) @@ -446,6 +462,24 @@ func _P2PCommunication_GetNextBlocks_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _P2PCommunication_RequestFileDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(model.FileDownloadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(P2PCommunicationServer).RequestFileDownload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/service.P2PCommunication/RequestFileDownload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(P2PCommunicationServer).RequestFileDownload(ctx, req.(*model.FileDownloadRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _P2PCommunication_serviceDesc = grpc.ServiceDesc{ ServiceName: "service.P2PCommunication", HandlerType: (*P2PCommunicationServer)(nil), @@ -494,6 +528,10 @@ var _P2PCommunication_serviceDesc = grpc.ServiceDesc{ MethodName: "GetNextBlocks", Handler: _P2PCommunication_GetNextBlocks_Handler, }, + { + MethodName: "RequestFileDownload", + Handler: _P2PCommunication_RequestFileDownload_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "service/p2pCommunication.proto", diff --git a/common/util/fastRandom.go b/common/util/fastRandom.go new file mode 100644 index 000000000..c37b91ecd --- /dev/null +++ b/common/util/fastRandom.go @@ -0,0 +1,16 @@ +package util + +import ( + "math/rand" + "time" +) + +// GetFastRandom generates a int64 random number +func GetFastRandom(seed *rand.Rand, max int) int64 { + return int64(seed.Intn(max)) +} + +// GetFastRandomSeed generates a new randome seed, a mnemonic that can be used to derive a private key +func GetFastRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().Unix())) +} diff --git a/common/util/fastRandom_test.go b/common/util/fastRandom_test.go new file mode 100644 index 000000000..edf5d8dd3 --- /dev/null +++ b/common/util/fastRandom_test.go @@ -0,0 +1,42 @@ +package util + +import ( + "math/rand" + "testing" +) + +func TestGetFastRandom(t *testing.T) { + type args struct { + seed *rand.Rand + max int + } + tests := []struct { + name string + args args + want int64 + }{ + { + name: "GetFastRandom:firstFixedSeed", + args: args{ + max: 10000, + seed: rand.New(rand.NewSource(1)), + }, + want: 8081, + }, + { + name: "GetFastRandom:secondFixedSeed", + args: args{ + max: 10000, + seed: rand.New(rand.NewSource(10)), + }, + want: 3454, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetFastRandom(tt.args.seed, tt.args.max); got != tt.want { + t.Errorf("GetFastRandom() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/core/blockchainsync/blockchainSync.go b/core/blockchainsync/blockchainSync.go index 637bb43a0..e3586370d 100644 --- a/core/blockchainsync/blockchainSync.go +++ b/core/blockchainsync/blockchainSync.go @@ -18,15 +18,15 @@ import ( // TODO: rename into something more specific, such as SyncService type Service struct { // isScanningBlockchain bool - ChainType chaintype.ChainType - PeerServiceClient client.PeerServiceClientInterface - PeerExplorer strategy.PeerExplorerStrategyInterface - BlockService service.BlockServiceInterface - BlockchainDownloader BlockchainDownloadInterface - ForkingProcessor ForkingProcessorInterface - Logger *log.Logger - TransactionUtil transaction.UtilInterface - BlockTypeStatusService service.BlockTypeStatusServiceInterface + ChainType chaintype.ChainType + PeerServiceClient client.PeerServiceClientInterface + PeerExplorer strategy.PeerExplorerStrategyInterface + BlockService service.BlockServiceInterface + BlockchainDownloader BlockchainDownloadInterface + ForkingProcessor ForkingProcessorInterface + Logger *log.Logger + TransactionUtil transaction.UtilInterface + BlockchainStatusService service.BlockchainStatusServiceInterface } func NewBlockchainSyncService( @@ -34,19 +34,19 @@ func NewBlockchainSyncService( peerServiceClient client.PeerServiceClientInterface, peerExplorer strategy.PeerExplorerStrategyInterface, logger *log.Logger, - blockTypeStatusService service.BlockTypeStatusServiceInterface, + blockchainStatusService service.BlockchainStatusServiceInterface, blockchainDownloader BlockchainDownloadInterface, forkingProcessor ForkingProcessorInterface, ) *Service { return &Service{ - ChainType: blockService.GetChainType(), - BlockService: blockService, - PeerServiceClient: peerServiceClient, - PeerExplorer: peerExplorer, - BlockchainDownloader: blockchainDownloader, - ForkingProcessor: forkingProcessor, - Logger: logger, - BlockTypeStatusService: blockTypeStatusService, + ChainType: blockService.GetChainType(), + BlockService: blockService, + PeerServiceClient: peerServiceClient, + PeerExplorer: peerExplorer, + BlockchainDownloader: blockchainDownloader, + ForkingProcessor: forkingProcessor, + Logger: logger, + BlockchainStatusService: blockchainStatusService, } } @@ -91,7 +91,7 @@ func (bss *Service) getMoreBlocks() { bss.Logger.Warn(fmt.Sprintf("failed to start getMoreBlocks go routine: %v", err)) } if lastBlock == nil { - bss.Logger.Warn("There is no genesis block found") + bss.Logger.Fatal("There is no genesis block found") } initialHeight := lastBlock.Height @@ -107,12 +107,10 @@ func (bss *Service) getMoreBlocks() { case blocker.P2PNetworkConnectionErr: // this will allow the node to start smithing if it fails to connect to the p2p network, // eg. he is the first node. if later on he can connect, it will try resolve the fork normally - bss.BlockTypeStatusService.SetIsSmithingLocked(false) + bss.Logger.Info(err) + bss.BlockchainStatusService.SetIsSmithingLocked(false) bss.Logger.Info(errCasted.Message) case blocker.ChainValidationErr: - // this will allow the node to start smithing if it fails to connect to the p2p network, - // eg. he is the first node. if later on he can connect, it will try resolve the fork normally - bss.BlockTypeStatusService.SetIsSmithingLocked(false) bss.Logger.Infof("peer %s:%d: %s", peerBlockchainInfo.Peer.GetInfo().Address, peerBlockchainInfo.Peer.GetInfo().Port, @@ -176,8 +174,11 @@ func (bss *Service) getMoreBlocks() { } if bss.BlockchainDownloader.IsDownloadFinish(lastBlock) { - bss.BlockTypeStatusService.SetIsDownloading(bss.ChainType, false) - bss.Logger.Infof("Finished %s blockchain download: %d blocks pulled", bss.ChainType.GetName(), lastBlock.Height-initialHeight) + bss.BlockchainStatusService.SetIsDownloading(bss.ChainType, false) + // only set the first download finished = true once + bss.BlockchainStatusService.SetFirstDownloadFinished(bss.ChainType, true) + bss.Logger.Infof("Finished %s blocks download: %d blocks pulled", bss.ChainType.GetName(), + lastBlock.Height-initialHeight) break } diff --git a/core/blockchainsync/downloadBlockchain.go b/core/blockchainsync/downloadBlockchain.go index c393e82ff..9ed3ee821 100644 --- a/core/blockchainsync/downloadBlockchain.go +++ b/core/blockchainsync/downloadBlockchain.go @@ -26,13 +26,13 @@ type ( ConfirmWithPeer(peerToCheck *model.Peer, commonMilestoneBlockID int64) ([]int64, error) } BlockchainDownloader struct { - PeerHasMore bool - ChainType chaintype.ChainType - BlockService service.BlockServiceInterface - PeerServiceClient client.PeerServiceClientInterface - PeerExplorer strategy.PeerExplorerStrategyInterface - Logger *log.Logger - BlockTypeStatusService service.BlockTypeStatusServiceInterface + PeerHasMore bool + ChainType chaintype.ChainType + BlockService service.BlockServiceInterface + PeerServiceClient client.PeerServiceClientInterface + PeerExplorer strategy.PeerExplorerStrategyInterface + Logger *log.Logger + BlockchainStatusService service.BlockchainStatusServiceInterface } PeerBlockchainInfo struct { @@ -53,15 +53,15 @@ func NewBlockchainDownloader( peerServiceClient client.PeerServiceClientInterface, peerExplorer strategy.PeerExplorerStrategyInterface, logger *log.Logger, - blockTypeStatusService service.BlockTypeStatusServiceInterface, + blockchainStatusService service.BlockchainStatusServiceInterface, ) *BlockchainDownloader { return &BlockchainDownloader{ - ChainType: blockService.GetChainType(), - BlockService: blockService, - PeerServiceClient: peerServiceClient, - PeerExplorer: peerExplorer, - Logger: logger, - BlockTypeStatusService: blockTypeStatusService, + ChainType: blockService.GetChainType(), + BlockService: blockService, + PeerServiceClient: peerServiceClient, + PeerExplorer: peerExplorer, + Logger: logger, + BlockchainStatusService: blockchainStatusService, } } @@ -76,8 +76,6 @@ func (bd *BlockchainDownloader) IsDownloadFinish(currentLastBlock *model.Block) heightAfterDownload := afterDownloadLastBlock.Height cumulativeDifficultyAfterDownload := afterDownloadLastBlock.CumulativeDifficulty if currentHeight > 0 && currentHeight == heightAfterDownload && currentCumulativeDifficulty == cumulativeDifficultyAfterDownload { - // we only initialize this flag (to false) in main, so once is set to true, it will always be true - bd.BlockTypeStatusService.SetFirstDownloadFinished(bd.ChainType, true) return true } return false @@ -156,9 +154,9 @@ func (bd *BlockchainDownloader) GetPeerBlockchainInfo() (*PeerBlockchainInfo, er }, blocker.NewBlocker(blocker.AppErr, "invalid common block") } - if !bd.BlockTypeStatusService.IsDownloading(bd.ChainType) && peerHeight-commonBlock.GetHeight() > 10 { + if !bd.BlockchainStatusService.IsDownloading(bd.ChainType) && peerHeight-commonBlock.GetHeight() > 10 { bd.Logger.Info("Blockchain download in progress") - bd.BlockTypeStatusService.SetIsDownloading(bd.ChainType, true) + bd.BlockchainStatusService.SetIsDownloading(bd.ChainType, true) } return &PeerBlockchainInfo{ diff --git a/core/blockchainsync/downloadBlockchain_test.go b/core/blockchainsync/downloadBlockchain_test.go index 1f5394209..5a396c40f 100644 --- a/core/blockchainsync/downloadBlockchain_test.go +++ b/core/blockchainsync/downloadBlockchain_test.go @@ -132,27 +132,27 @@ func (*mockBlockServiceFail) GetLastBlock() (*model.Block, error) { } type ( - mockBlockTypeStatusService struct { - coreService.BlockTypeStatusService + mockBlockchainStatusService struct { + coreService.BlockchainStatusService } ) -func (*mockBlockTypeStatusService) IsFirstDownloadFinished(ct chaintype.ChainType) bool { +func (*mockBlockchainStatusService) IsFirstDownloadFinished(ct chaintype.ChainType) bool { return true } -func (*mockBlockTypeStatusService) IsDownloading(ct chaintype.ChainType) bool { +func (*mockBlockchainStatusService) IsDownloading(ct chaintype.ChainType) bool { return true } func TestGetPeerCommonBlockID(t *testing.T) { type args struct { - PeerServiceClient client.PeerServiceClientInterface - PeerExplorer strategy.PeerExplorerStrategyInterface - blockService coreService.BlockServiceInterface - queryService query.ExecutorInterface - logger *log.Logger - blockTypeStatusService coreService.BlockTypeStatusServiceInterface + PeerServiceClient client.PeerServiceClientInterface + PeerExplorer strategy.PeerExplorerStrategyInterface + blockService coreService.BlockServiceInterface + queryService query.ExecutorInterface + logger *log.Logger + blockchainStatusService coreService.BlockchainStatusServiceInterface } tests := []struct { @@ -164,12 +164,12 @@ func TestGetPeerCommonBlockID(t *testing.T) { { name: "want:getPeerCommonBlockID successfully return common block ID", args: args{ - PeerServiceClient: &mockP2pServiceSuccess{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceSuccess{}, - queryService: &mockQueryServiceSuccess{}, - logger: log.New(), - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceSuccess{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceSuccess{}, + queryService: &mockQueryServiceSuccess{}, + logger: log.New(), + blockchainStatusService: &mockBlockchainStatusService{}, }, want: int64(1), wantErr: false, @@ -177,12 +177,12 @@ func TestGetPeerCommonBlockID(t *testing.T) { { name: "wantErr:getPeerCommonBlockID get last block failed", args: args{ - PeerServiceClient: &mockP2pServiceSuccess{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceFail{}, - queryService: &mockQueryServiceSuccess{}, - logger: log.New(), - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceSuccess{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceFail{}, + queryService: &mockQueryServiceSuccess{}, + logger: log.New(), + blockchainStatusService: &mockBlockchainStatusService{}, }, want: int64(0), wantErr: true, @@ -190,12 +190,12 @@ func TestGetPeerCommonBlockID(t *testing.T) { { name: "wantErr:getPeerCommonBlockID grpc error", args: args{ - PeerServiceClient: &mockP2pServiceFail{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceSuccess{}, - queryService: &mockQueryServiceSuccess{}, - logger: log.New(), - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceFail{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceSuccess{}, + queryService: &mockQueryServiceSuccess{}, + logger: log.New(), + blockchainStatusService: &mockBlockchainStatusService{}, }, want: int64(0), wantErr: true, @@ -205,11 +205,11 @@ func TestGetPeerCommonBlockID(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { blockchainDownloader := &BlockchainDownloader{ - BlockService: tt.args.blockService, - PeerServiceClient: tt.args.PeerServiceClient, - PeerExplorer: tt.args.PeerExplorer, - Logger: tt.args.logger, - BlockTypeStatusService: tt.args.blockTypeStatusService, + BlockService: tt.args.blockService, + PeerServiceClient: tt.args.PeerServiceClient, + PeerExplorer: tt.args.PeerExplorer, + Logger: tt.args.logger, + BlockchainStatusService: tt.args.blockchainStatusService, } got, err := blockchainDownloader.getPeerCommonBlockID( &model.Peer{}, @@ -227,11 +227,11 @@ func TestGetPeerCommonBlockID(t *testing.T) { func TestGetBlockIdsAfterCommon(t *testing.T) { type args struct { - PeerServiceClient client.PeerServiceClientInterface - PeerExplorer strategy.PeerExplorerStrategyInterface - blockService coreService.BlockServiceInterface - queryService query.ExecutorInterface - blockTypeStatusService coreService.BlockTypeStatusServiceInterface + PeerServiceClient client.PeerServiceClientInterface + PeerExplorer strategy.PeerExplorerStrategyInterface + blockService coreService.BlockServiceInterface + queryService query.ExecutorInterface + blockchainStatusService coreService.BlockchainStatusServiceInterface } tests := []struct { @@ -242,44 +242,44 @@ func TestGetBlockIdsAfterCommon(t *testing.T) { { name: "want:getBlockIdsAfterCommon (all getBlockIdsAfterCommon new)", args: args{ - PeerServiceClient: &mockP2pServiceSuccessNewResult{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceSuccess{}, - queryService: &mockQueryServiceSuccess{}, - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceSuccessNewResult{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceSuccess{}, + queryService: &mockQueryServiceSuccess{}, + blockchainStatusService: &mockBlockchainStatusService{}, }, want: []int64{3, 4}, }, { name: "want:getBlockIdsAfterCommon (some getBlockIdsAfterCommon already exists)", args: args{ - PeerServiceClient: &mockP2pServiceSuccess{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceSuccess{}, - queryService: &mockQueryServiceSuccess{}, - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceSuccess{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceSuccess{}, + queryService: &mockQueryServiceSuccess{}, + blockchainStatusService: &mockBlockchainStatusService{}, }, want: []int64{2, 3, 4}, }, { name: "want:getBlockIdsAfterCommon (all getBlockIdsAfterCommon already exists)", args: args{ - PeerServiceClient: &mockP2pServiceSuccessOneResult{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceSuccess{}, - queryService: &mockQueryServiceSuccess{}, - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceSuccessOneResult{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceSuccess{}, + queryService: &mockQueryServiceSuccess{}, + blockchainStatusService: &mockBlockchainStatusService{}, }, want: []int64{1}, }, { name: "want:getBlockIdsAfterCommon (GetNextBlockIDs produce error)", args: args{ - PeerServiceClient: &mockP2pServiceFail{}, - PeerExplorer: &mockPeerExplorer{}, - blockService: &mockBlockServiceSuccess{}, - queryService: &mockQueryServiceSuccess{}, - blockTypeStatusService: &mockBlockTypeStatusService{}, + PeerServiceClient: &mockP2pServiceFail{}, + PeerExplorer: &mockPeerExplorer{}, + blockService: &mockBlockServiceSuccess{}, + queryService: &mockQueryServiceSuccess{}, + blockchainStatusService: &mockBlockchainStatusService{}, }, want: []int64{}, }, @@ -288,10 +288,10 @@ func TestGetBlockIdsAfterCommon(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { blockchainDownloader := &BlockchainDownloader{ - BlockService: tt.args.blockService, - PeerServiceClient: tt.args.PeerServiceClient, - PeerExplorer: tt.args.PeerExplorer, - BlockTypeStatusService: tt.args.blockTypeStatusService, + BlockService: tt.args.blockService, + PeerServiceClient: tt.args.PeerServiceClient, + PeerExplorer: tt.args.PeerExplorer, + BlockchainStatusService: tt.args.blockchainStatusService, } got := blockchainDownloader.getBlockIdsAfterCommon( &model.Peer{}, @@ -337,10 +337,10 @@ func TestGetNextBlocks(t *testing.T) { nil, ) blockchainDownloader := &BlockchainDownloader{ - BlockService: blockService, - PeerServiceClient: &mockP2pServiceSuccess{}, - PeerExplorer: &mockPeerExplorer{}, - BlockTypeStatusService: &mockBlockTypeStatusService{}, + BlockService: blockService, + PeerServiceClient: &mockP2pServiceSuccess{}, + PeerExplorer: &mockPeerExplorer{}, + BlockchainStatusService: &mockBlockchainStatusService{}, } type args struct { diff --git a/core/service/blockCoreService.go b/core/service/blockCoreService.go index 71401c73a..afeb9bcbe 100644 --- a/core/service/blockCoreService.go +++ b/core/service/blockCoreService.go @@ -25,7 +25,7 @@ type ( PushBlock(previousBlock, block *model.Block, broadcast, persist bool) error GetBlockByID(id int64, withAttachedData bool) (*model.Block, error) GetBlockByHeight(uint32) (*model.Block, error) - GetBlocksFromHeight(uint32, uint32) ([]*model.Block, error) + GetBlocksFromHeight(startHeight, limit uint32, withAttachedData bool) ([]*model.Block, error) GetLastBlock() (*model.Block, error) GetBlockHash(block *model.Block) ([]byte, error) GetBlocks() ([]*model.Block, error) diff --git a/core/service/blockMainService.go b/core/service/blockMainService.go index cf13c059c..3937bff1a 100644 --- a/core/service/blockMainService.go +++ b/core/service/blockMainService.go @@ -755,7 +755,7 @@ func (bs *BlockService) GetBlockByID(id int64, withAttachedData bool) (*model.Bl // GetBlocksFromHeight get all blocks from a given height till last block (or a given limit is reached). // Note: this only returns main block data, it doesn't populate attached data (transactions, receipts) -func (bs *BlockService) GetBlocksFromHeight(startHeight, limit uint32) ([]*model.Block, error) { +func (bs *BlockService) GetBlocksFromHeight(startHeight, limit uint32, withAttachedData bool) ([]*model.Block, error) { var blocks []*model.Block rows, err := bs.QueryExecutor.ExecuteSelect(bs.BlockQuery.GetBlockFromHeight(startHeight, limit), false) if err != nil { @@ -1314,7 +1314,12 @@ func (bs *BlockService) PopOffToBlock(commonBlock *model.Block) ([]*model.Block, if err != nil { return nil, err } - + // + // TODO: here we should also delete all snapshot files relative to the block manifests being rolled back during derived tables + // rollback. Something like this: + // - before rolling back derived queries, select all spine block manifest records from commonBlock.Height till last + // - delete all snapshots referenced by them + // if mempoolsBackupBytes.Len() > 0 { kvdbMempoolsBackupKey := commonUtils.GetKvDbMempoolDBKey(bs.GetChainType()) err = bs.KVExecutor.Insert(kvdbMempoolsBackupKey, mempoolsBackupBytes.Bytes(), int(constant.KVDBMempoolsBackupExpiry)) diff --git a/core/service/blockMainService_test.go b/core/service/blockMainService_test.go index 977449cf3..64c8547e3 100644 --- a/core/service/blockMainService_test.go +++ b/core/service/blockMainService_test.go @@ -2484,6 +2484,7 @@ func TestBlockService_GetBlocksFromHeight(t *testing.T) { } type args struct { startHeight, limit uint32 + withAttachedData bool } tests := []struct { name string @@ -2500,8 +2501,9 @@ func TestBlockService_GetBlocksFromHeight(t *testing.T) { BlockQuery: query.NewBlockQuery(&chaintype.MainChain{}), }, args: args{ - startHeight: 0, - limit: 2, + startHeight: 0, + limit: 2, + withAttachedData: false, }, want: []*model.Block{ &mockBlockData, @@ -2534,7 +2536,7 @@ func TestBlockService_GetBlocksFromHeight(t *testing.T) { AccountBalanceQuery: tt.fields.AccountBalanceQuery, Observer: tt.fields.Observer, } - got, err := bs.GetBlocksFromHeight(tt.args.startHeight, tt.args.limit) + got, err := bs.GetBlocksFromHeight(tt.args.startHeight, tt.args.limit, tt.args.withAttachedData) if (err != nil) != tt.wantErr { t.Errorf("BlockService.GetBlocksFromHeight() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/core/service/blockSpineService.go b/core/service/blockSpineService.go index 25dcd859a..c3a57b6b9 100644 --- a/core/service/blockSpineService.go +++ b/core/service/blockSpineService.go @@ -180,8 +180,8 @@ func (bs *BlockSpineService) NewGenesisBlock( // ValidateBlock validate block to be pushed into the blockchain func (bs *BlockSpineService) ValidateBlock(block, previousLastBlock *model.Block, curTime int64) error { - // TODO: should we validate the received spineblcokManifests against the one that have been generated locally? - // what if they have been deleted? + // TODO: validate spine block manifest if part of block data + // - re-calculate block payload hash using data from 'block' func argument and compare it with block.PayloadHash // todo: validate previous time if block.GetTimestamp() > curTime+constant.GenerateBlockTimeoutSec { @@ -306,6 +306,17 @@ func (bs *BlockSpineService) PushBlock(previousBlock, block *model.Block, broadc return err } + // if present, add new spine block manifests into spineBlockManifest table + for _, spineBlockManifest := range block.SpineBlockManifests { + if err := bs.SpineBlockManifestService.InsertSpineBlockManifest(spineBlockManifest); err != nil { + bs.Logger.Error(err.Error()) + if rollbackErr := bs.QueryExecutor.RollbackTx(); rollbackErr != nil { + bs.Logger.Error(rollbackErr.Error()) + } + return err + } + } + err = bs.QueryExecutor.CommitTx() if err != nil { // commit automatically unlock executor and close tx return err @@ -353,7 +364,7 @@ func (bs *BlockSpineService) GetBlockByID(id int64, withAttachedData bool) (*mod // GetBlocksFromHeight get all blocks from a given height till last block (or a given limit is reached). // Note: this only returns main block data, it doesn't populate attached data (spinePublicKeys) -func (bs *BlockSpineService) GetBlocksFromHeight(startHeight, limit uint32) ([]*model.Block, error) { +func (bs *BlockSpineService) GetBlocksFromHeight(startHeight, limit uint32, withAttachedData bool) ([]*model.Block, error) { var blocks []*model.Block rows, err := bs.QueryExecutor.ExecuteSelect(bs.BlockQuery.GetBlockFromHeight(startHeight, limit), false) if err != nil { @@ -364,6 +375,14 @@ func (bs *BlockSpineService) GetBlocksFromHeight(startHeight, limit uint32) ([]* if err != nil { return nil, blocker.NewBlocker(blocker.DBErr, "failed to build model") } + if withAttachedData { + for _, block := range blocks { + err := bs.PopulateBlockData(block) + if err != nil { + return nil, blocker.NewBlocker(blocker.DBErr, err.Error()) + } + } + } return blocks, nil } diff --git a/core/service/blockSpineService_test.go b/core/service/blockSpineService_test.go index 4578c0324..30e1a5bea 100644 --- a/core/service/blockSpineService_test.go +++ b/core/service/blockSpineService_test.go @@ -2144,6 +2144,7 @@ func TestBlockSpineService_GetBlocksFromHeight(t *testing.T) { } type args struct { startHeight, limit uint32 + withAttachedData bool } tests := []struct { name string @@ -2160,8 +2161,9 @@ func TestBlockSpineService_GetBlocksFromHeight(t *testing.T) { BlockQuery: query.NewBlockQuery(&chaintype.SpineChain{}), }, args: args{ - startHeight: 0, - limit: 2, + startHeight: 0, + limit: 2, + withAttachedData: false, }, want: []*model.Block{ &mockSpineBlockData, @@ -2189,7 +2191,7 @@ func TestBlockSpineService_GetBlocksFromHeight(t *testing.T) { Signature: tt.fields.Signature, Observer: tt.fields.Observer, } - got, err := bs.GetBlocksFromHeight(tt.args.startHeight, tt.args.limit) + got, err := bs.GetBlocksFromHeight(tt.args.startHeight, tt.args.limit, tt.args.withAttachedData) if (err != nil) != tt.wantErr { t.Errorf("BlockSpineService.GetBlocksFromHeight() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/core/service/blockTypeStatusService.go b/core/service/blockTypeStatusService.go deleted file mode 100644 index 94b3e08b3..000000000 --- a/core/service/blockTypeStatusService.go +++ /dev/null @@ -1,74 +0,0 @@ -package service - -import "github.com/zoobc/zoobc-core/common/chaintype" - -type ( - BlockTypeStatusServiceInterface interface { - SetFirstDownloadFinished(ct chaintype.ChainType, isSpineBlocksDownloadFinished bool) - IsFirstDownloadFinished(ct chaintype.ChainType) bool - SetIsDownloading(ct chaintype.ChainType, newValue bool) - IsDownloading(ct chaintype.ChainType) bool - SetIsSmithingLocked(isSmithingLocked bool) - IsSmithingLocked() bool - SetIsSmithing(ct chaintype.ChainType, smithing bool) - IsSmithing(ct chaintype.ChainType) bool - } -) - -type ( - BlockTypeStatusService struct { - isFirstDownloadFinished map[int32]bool - isDownloading map[int32]bool - isSmithing map[int32]bool - isSmithingLocked bool - } -) - -func NewBlockTypeStatusService( - lockSmithing bool, -) *BlockTypeStatusService { - // init variables for all block types - var btss = &BlockTypeStatusService{ - isDownloading: make(map[int32]bool), - isFirstDownloadFinished: make(map[int32]bool), - isSmithing: make(map[int32]bool), - } - for _, ct := range chaintype.GetChainTypes() { - btss.isDownloading[ct.GetTypeInt()] = false - btss.isFirstDownloadFinished[ct.GetTypeInt()] = false - } - btss.isSmithingLocked = lockSmithing - return btss -} - -func (btss *BlockTypeStatusService) SetFirstDownloadFinished(ct chaintype.ChainType, finished bool) { - btss.isFirstDownloadFinished[ct.GetTypeInt()] = finished -} - -func (btss *BlockTypeStatusService) IsFirstDownloadFinished(ct chaintype.ChainType) bool { - return btss.isFirstDownloadFinished[ct.GetTypeInt()] -} - -func (btss *BlockTypeStatusService) SetIsDownloading(ct chaintype.ChainType, newValue bool) { - btss.isDownloading[ct.GetTypeInt()] = newValue -} - -func (btss *BlockTypeStatusService) IsDownloading(ct chaintype.ChainType) bool { - return btss.isDownloading[ct.GetTypeInt()] -} - -func (btss *BlockTypeStatusService) SetIsSmithingLocked(isSmithingLocked bool) { - btss.isSmithingLocked = isSmithingLocked -} - -func (btss *BlockTypeStatusService) IsSmithingLocked() bool { - return btss.isSmithingLocked -} - -func (btss *BlockTypeStatusService) SetIsSmithing(ct chaintype.ChainType, isSmithing bool) { - btss.isSmithing[ct.GetTypeInt()] = isSmithing -} - -func (btss *BlockTypeStatusService) IsSmithing(ct chaintype.ChainType) bool { - return btss.isSmithing[ct.GetTypeInt()] -} diff --git a/core/service/blockchainStatusService.go b/core/service/blockchainStatusService.go new file mode 100644 index 000000000..b800b90d0 --- /dev/null +++ b/core/service/blockchainStatusService.go @@ -0,0 +1,125 @@ +package service + +import ( + log "github.com/sirupsen/logrus" + "github.com/zoobc/zoobc-core/common/chaintype" + "github.com/zoobc/zoobc-core/common/model" +) + +type ( + BlockchainStatusServiceInterface interface { + SetFirstDownloadFinished(ct chaintype.ChainType, isSpineBlocksDownloadFinished bool) + IsFirstDownloadFinished(ct chaintype.ChainType) bool + SetIsDownloading(ct chaintype.ChainType, newValue bool) + IsDownloading(ct chaintype.ChainType) bool + SetIsSmithingLocked(isSmithingLocked bool) + IsSmithingLocked() bool + SetIsSmithing(ct chaintype.ChainType, smithing bool) + IsSmithing(ct chaintype.ChainType) bool + SetIsDownloadingSnapshot(ct chaintype.ChainType, isDownloadingSnapshot bool) + IsDownloadingSnapshot(ct chaintype.ChainType) bool + } +) + +type ( + BlockchainStatusService struct { + Logger *log.Logger + } +) + +var ( + isFirstDownloadFinished = model.NewMapIntBool() + isDownloading = model.NewMapIntBool() + isDownloadingSnapshot = model.NewMapIntBool() + isSmithing = model.NewMapIntBool() + isSmithingLocked bool +) + +func NewBlockchainStatusService( + lockSmithing bool, + logger *log.Logger, +) *BlockchainStatusService { + // init variables for all block types + var btss = &BlockchainStatusService{ + Logger: logger, + } + btss.SetIsSmithingLocked(lockSmithing) + return btss +} + +func (btss *BlockchainStatusService) SetFirstDownloadFinished(ct chaintype.ChainType, finished bool) { + // set it only once, when the node starts + if res, ok := isFirstDownloadFinished.Load(ct.GetTypeInt()); ok && res { + return + } + isFirstDownloadFinished.Store(ct.GetTypeInt(), finished) + if finished { + btss.Logger.Infof("%s first download finished", ct.GetName()) + } +} + +func (btss *BlockchainStatusService) IsFirstDownloadFinished(ct chaintype.ChainType) bool { + if res, ok := isFirstDownloadFinished.Load(ct.GetTypeInt()); ok { + return res + } + return false +} + +func (btss *BlockchainStatusService) SetIsDownloading(ct chaintype.ChainType, downloading bool) { + isDownloading.Store(ct.GetTypeInt(), downloading) +} + +func (btss *BlockchainStatusService) IsDownloading(ct chaintype.ChainType) bool { + if res, ok := isDownloading.Load(ct.GetTypeInt()); ok { + return res + } + return false +} + +func (btss *BlockchainStatusService) SetIsSmithingLocked(smithingLocked bool) { + var ( + lockedStr string + ) + isSmithingLocked = smithingLocked + if isSmithingLocked { + lockedStr = "locked" + } else { + lockedStr = "unlocked" + } + btss.Logger.Infof("smithing process %s...", lockedStr) +} + +func (btss *BlockchainStatusService) IsSmithingLocked() bool { + return isSmithingLocked +} + +func (btss *BlockchainStatusService) SetIsSmithing(ct chaintype.ChainType, smithing bool) { + isSmithing.Store(ct.GetTypeInt(), smithing) +} + +func (btss *BlockchainStatusService) IsSmithing(ct chaintype.ChainType) bool { + if res, ok := isSmithing.Load(ct.GetTypeInt()); ok { + return res + } + return false +} + +func (btss *BlockchainStatusService) SetIsDownloadingSnapshot(ct chaintype.ChainType, downloadingSnapshot bool) { + isDownloadingSnapshot.Store(ct.GetTypeInt(), downloadingSnapshot) + if downloadingSnapshot { + btss.Logger.Infof("Downloading snapshot for %s...", ct.GetName()) + } else { + btss.Logger.Infof("Finished Downloading snapshot for %s...", ct.GetName()) + } + +} + +func (btss *BlockchainStatusService) IsDownloadingSnapshot(ct chaintype.ChainType) bool { + if !ct.HasSnapshots() { + return false + } + if res, ok := isDownloadingSnapshot.Load(ct.GetTypeInt()); ok { + return res + } + return false +} diff --git a/core/service/blockchainStatusService_test.go b/core/service/blockchainStatusService_test.go new file mode 100644 index 000000000..890e265bd --- /dev/null +++ b/core/service/blockchainStatusService_test.go @@ -0,0 +1,422 @@ +package service + +import ( + "reflect" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/zoobc/zoobc-core/common/chaintype" +) + +func TestNewBlockchainStatusService(t *testing.T) { + type args struct { + lockSmithing bool + logger *log.Logger + } + logTest := log.New() + tests := []struct { + name string + args args + want *BlockchainStatusService + }{ + { + name: "NewBlockchainStatusService", + args: args{ + lockSmithing: true, + logger: logTest, + }, + want: &BlockchainStatusService{ + Logger: logTest, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := NewBlockchainStatusService(tt.args.lockSmithing, tt.args.logger) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBlockchainStatusService() = %v, want %v", got, tt.want) + } + if got.IsSmithingLocked() != tt.args.lockSmithing { + t.Errorf("NewBlockchainStatusService() lockSmithing = %v, want %v", got.IsSmithingLocked(), + tt.args.lockSmithing) + } + }) + } +} + +func TestBlockchainStatusService_SetFirstDownloadFinished(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + finished bool + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "SetFirstDownloadFinished", + args: args{ + ct: &chaintype.MainChain{}, + finished: true, + }, + fields: fields{ + Logger: log.New(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + // test map concurrency r/w + for i := 0; i < 10; i++ { + go btss.SetFirstDownloadFinished(tt.args.ct, tt.args.finished) + go btss.IsFirstDownloadFinished(tt.args.ct) + } + }) + } +} + +func TestBlockchainStatusService_IsFirstDownloadFinished(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + setVal bool + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "IsFirstDownloadFinished", + fields: fields{ + Logger: log.New(), + }, + args: args{ + ct: &chaintype.MainChain{}, + setVal: true, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + btss.SetFirstDownloadFinished(tt.args.ct, tt.args.setVal) + if got := btss.IsFirstDownloadFinished(tt.args.ct); got != tt.want { + t.Errorf("BlockchainStatusService.IsFirstDownloadFinished() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBlockchainStatusService_SetIsDownloading(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + downloading bool + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "SetIsDownloading", + args: args{ + ct: &chaintype.MainChain{}, + downloading: true, + }, + fields: fields{ + Logger: log.New(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + // test map concurrency r/w + for i := 0; i < 10; i++ { + go btss.SetIsDownloading(tt.args.ct, tt.args.downloading) + go btss.IsDownloading(tt.args.ct) + } + }) + } +} + +func TestBlockchainStatusService_IsDownloading(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + setVal bool + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "IsDownloading", + fields: fields{ + Logger: log.New(), + }, + args: args{ + ct: &chaintype.MainChain{}, + setVal: true, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + btss.SetIsDownloading(tt.args.ct, tt.args.setVal) + if got := btss.IsDownloading(tt.args.ct); got != tt.want { + t.Errorf("BlockchainStatusService.IsDownloading() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBlockchainStatusService_SetIsSmithingLocked(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + smithingLocked bool + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "SetIsDownloading", + args: args{ + smithingLocked: true, + }, + fields: fields{ + Logger: log.New(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + btss.SetIsSmithingLocked(tt.args.smithingLocked) + }) + } +} + +func TestBlockchainStatusService_IsSmithingLocked(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + setVal bool + } + tests := []struct { + name string + args args + fields fields + want bool + }{ + { + name: "IsDownloading", + fields: fields{ + Logger: log.New(), + }, + args: args{ + setVal: true, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + btss.SetIsSmithingLocked(tt.args.setVal) + if got := btss.IsSmithingLocked(); got != tt.want { + t.Errorf("BlockchainStatusService.IsSmithingLocked() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBlockchainStatusService_SetIsSmithing(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + smithing bool + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "SetIsDownloading", + args: args{ + ct: &chaintype.MainChain{}, + smithing: true, + }, + fields: fields{ + Logger: log.New(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + // test map concurrency r/w + for i := 0; i < 10; i++ { + go btss.SetIsSmithing(tt.args.ct, tt.args.smithing) + go btss.IsSmithing(tt.args.ct) + } + }) + } +} + +func TestBlockchainStatusService_IsSmithing(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + setVal bool + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "IsSmithing", + fields: fields{ + Logger: log.New(), + }, + args: args{ + ct: &chaintype.MainChain{}, + setVal: true, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + btss.SetIsSmithing(tt.args.ct, tt.args.setVal) + if got := btss.IsSmithing(tt.args.ct); got != tt.want { + t.Errorf("BlockchainStatusService.IsSmithing() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBlockchainStatusService_SetIsDownloadingSnapshot(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + downloadingSnapshot bool + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "SetIsDownloading", + args: args{ + ct: &chaintype.MainChain{}, + downloadingSnapshot: true, + }, + fields: fields{ + Logger: log.New(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + // test map concurrency r/w + for i := 0; i < 10; i++ { + go btss.SetIsDownloadingSnapshot(tt.args.ct, tt.args.downloadingSnapshot) + go btss.IsDownloadingSnapshot(tt.args.ct) + } + }) + } +} + +func TestBlockchainStatusService_IsDownloadingSnapshot(t *testing.T) { + type fields struct { + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + setVal bool + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "IsSmithing", + fields: fields{ + Logger: log.New(), + }, + args: args{ + ct: &chaintype.MainChain{}, + setVal: true, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + btss := &BlockchainStatusService{ + Logger: tt.fields.Logger, + } + btss.SetIsDownloadingSnapshot(tt.args.ct, tt.args.setVal) + if got := btss.IsDownloadingSnapshot(tt.args.ct); got != tt.want { + t.Errorf("BlockchainStatusService.IsDownloadingSnapshot() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/core/service/fileDownloaderService.go b/core/service/fileDownloaderService.go deleted file mode 100644 index bf4dcf55e..000000000 --- a/core/service/fileDownloaderService.go +++ /dev/null @@ -1,48 +0,0 @@ -package service - -import ( - log "github.com/sirupsen/logrus" -) - -type ( - // FileDownloaderServiceInterface snapshot logic shared across block types - FileDownloaderServiceInterface interface { - DownloadFileByName(fileName string, fileHash []byte) error - } - - FileDownloaderService struct { - DownloadPath string - FileService FileServiceInterface - Logger *log.Logger - } -) - -func NewFileDownloaderService( - downloadPath string, - fileService FileServiceInterface, - logger *log.Logger, -) *FileDownloaderService { - return &FileDownloaderService{ - DownloadPath: downloadPath, - FileService: fileService, - Logger: logger, - } -} - -// DownloadSnapshotChunk TODO: implement logic to download a file from a random peer -func (fds *FileDownloaderService) DownloadFileByName(fileName string, fileHash []byte) error { - // TODO: download file from a peer - // FIXME uncomment once file download has been fully implemented - // filePath := filepath.Join(fds.DownloadPath, fileName) - // ok, err := util.VerifyFileHash(filePath, fileHash, sha3.New256()) - // if err != nil { - // return err - // } - // if !ok { - // return blocker.NewBlocker( - // blocker.AppErr, - // "CorruptedFile", - // ) - // } - return nil -} diff --git a/core/service/fileService.go b/core/service/fileService.go index 033d2adbf..15d64f613 100644 --- a/core/service/fileService.go +++ b/core/service/fileService.go @@ -1,25 +1,32 @@ package service import ( + "bytes" + "encoding/base64" "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + log "github.com/sirupsen/logrus" "github.com/ugorji/go/codec" "github.com/zoobc/zoobc-core/common/blocker" - "github.com/zoobc/zoobc-core/common/util" "golang.org/x/crypto/sha3" - "io/ioutil" - "os" - "path/filepath" ) type ( FileServiceInterface interface { + GetDownloadPath() string + ParseFileChunkHashes(fileHashes []byte, hashLength int) (fileHashesAry [][]byte, err error) ReadFileByHash(filePath string, fileHash []byte) ([]byte, error) + ReadFileByName(filePath, fileName string) ([]byte, error) DeleteFilesByHash(filePath string, fileHashes [][]byte) error SaveBytesToFile(fileBasePath, filename string, b []byte) error - GetFileNameFromHash(fileHash []byte) (string, error) + GetFileNameFromHash(fileHash []byte) string + GetFileNameFromBytes(fileBytes []byte) string GetHashFromFileName(fileName string) ([]byte, error) - VerifyFileHash(filePath string, hash []byte) (bool, error) + VerifyFileChecksum(fileBytes, hash []byte) bool HashPayload(b []byte) ([]byte, error) EncodePayload(v interface{}) (b []byte, err error) DecodePayload(b []byte, v interface{}) error @@ -27,30 +34,50 @@ type ( } FileService struct { - Logger *log.Logger - h codec.Handle + Logger *log.Logger + h codec.Handle + snapshotPath string } ) func NewFileService( logger *log.Logger, encoderHandler codec.Handle, + snapshotPath string, ) FileServiceInterface { return &FileService{ - Logger: logger, - h: encoderHandler, // this variable is only set when constructing the service and never mutated + Logger: logger, + h: encoderHandler, // this variable is only set when constructing the service and never mutated + snapshotPath: snapshotPath, } } -func (fs *FileService) VerifyFileHash(filePath string, hash []byte) (bool, error) { - return util.VerifyFileHash(filePath, hash, sha3.New256()) +func (fs *FileService) GetDownloadPath() string { + return fs.snapshotPath } -func (fs *FileService) ReadFileByHash(filePath string, fileHash []byte) ([]byte, error) { - fileName, err := fs.GetFileNameFromHash(fileHash) - if err != nil { - return nil, err +func (fs *FileService) ParseFileChunkHashes(fileHashes []byte, hashLength int) (fileHashesAry [][]byte, err error) { + // math.Mod returns the reminder of len(fileHashes)/hashLength + // we use it to check if the length of fileHashes is a multiple of the single hash's length (32 bytes for sha256) + if len(fileHashes) < hashLength || math.Mod(float64(len(fileHashes)), float64(hashLength)) > 0 { + return nil, blocker.NewBlocker(blocker.ValidationErr, "invalid file chunks hashes length") } + for i := 0; i < len(fileHashes); i += hashLength { + fileHashesAry = append(fileHashesAry, fileHashes[i:i+hashLength]) + } + return fileHashesAry, nil +} + +func (fs *FileService) VerifyFileChecksum(fileBytes, hash []byte) bool { + computed := sha3.Sum256(fileBytes) + return bytes.Equal(computed[:], hash) +} + +func (fs *FileService) ReadFileByHash(filePath string, fileHash []byte) ([]byte, error) { + return fs.ReadFileByName(filePath, fs.GetFileNameFromHash(fileHash)) +} + +func (fs *FileService) ReadFileByName(filePath, fileName string) ([]byte, error) { filePathName := filepath.Join(filePath, fileName) chunkBytes, err := ioutil.ReadFile(filePathName) if err != nil { @@ -105,40 +132,26 @@ func (fs *FileService) HashPayload(b []byte) ([]byte, error) { return hasher.Sum([]byte{}), nil } -// GetHashFromFileName file name to hash conversion -// TODO: refactor GetPublicKeyFromAddress name as it can be applied to other use cases, such as this one +// GetHashFromFileName file hash to hash-name conversion: base64 urlencoded func (*FileService) GetHashFromFileName(fileName string) ([]byte, error) { - hash, err := util.GetPublicKeyFromAddress(fileName) - if err != nil { - return nil, blocker.NewBlocker( - blocker.AppErr, - "invalid file name", - ) - } - return hash, nil + return base64.URLEncoding.DecodeString(fileName) } -// GetFileNameFromHash file hash to fileName conversion -// TODO: refactor GetAddressFromPublicKey name as it can be applied to other use cases, such as this one -func (*FileService) GetFileNameFromHash(fileHash []byte) (string, error) { - fileName, err := util.GetAddressFromPublicKey(fileHash) - if err != nil { - return "", blocker.NewBlocker( - blocker.ServerError, - "invalid file hash length", - ) - } - return fileName, nil +// GetFileNameFromHash file hash to fileName conversion: base64 urlencoded +func (*FileService) GetFileNameFromHash(fileHash []byte) string { + return base64.URLEncoding.EncodeToString(fileHash) +} + +// GetFileNameFromBytes helper method to get a hash-name from file raw bytes +func (fs *FileService) GetFileNameFromBytes(fileBytes []byte) string { + fileHash := sha3.Sum256(fileBytes) + return fs.GetFileNameFromHash(fileHash[:]) } // DeleteFilesByHash remove a list of files by their hash/names func (fs *FileService) DeleteFilesByHash(filePath string, fileHashes [][]byte) error { for _, fileChunkHash := range fileHashes { - fileName, err := fs.GetFileNameFromHash(fileChunkHash) - if err != nil { - return err - } - filePathName := filepath.Join(filePath, fileName) + filePathName := filepath.Join(filePath, fs.GetFileNameFromHash(fileChunkHash)) if err := os.Remove(filePathName); err != nil { return err } diff --git a/core/service/fileService_test.go b/core/service/fileService_test.go new file mode 100644 index 000000000..0298739bd --- /dev/null +++ b/core/service/fileService_test.go @@ -0,0 +1,65 @@ +package service + +import ( + "reflect" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/ugorji/go/codec" +) + +func TestFileService_ParseFileChunkHashes(t *testing.T) { + type fields struct { + Logger *log.Logger + h codec.Handle + snapshotPath string + } + type args struct { + fileHashes []byte + hashLength int + } + tests := []struct { + name string + fields fields + args args + wantFileHashesAry [][]byte + wantErr bool + }{ + { + name: "ParseFileChunkHashes:success", + args: args{ + hashLength: 32, + fileHashes: make([]byte, 64), + }, + wantFileHashesAry: [][]byte{ + make([]byte, 32), + make([]byte, 32), + }, + }, + { + name: "ParseFileChunkHashes:fail-{InvalidHashesLength}", + args: args{ + hashLength: 32, + fileHashes: make([]byte, 65), + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := &FileService{ + Logger: tt.fields.Logger, + h: tt.fields.h, + snapshotPath: tt.fields.snapshotPath, + } + gotFileHashesAry, err := fs.ParseFileChunkHashes(tt.args.fileHashes, tt.args.hashLength) + if (err != nil) != tt.wantErr { + t.Errorf("FileService.ParseFileChunkHashes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotFileHashesAry, tt.wantFileHashesAry) { + t.Errorf("FileService.ParseFileChunkHashes() = %v, want %v", gotFileHashesAry, tt.wantFileHashesAry) + } + }) + } +} diff --git a/core/service/snapshotBasicChunkStrategy.go b/core/service/snapshotBasicChunkStrategy.go index 6a1831c38..7bd129fd8 100644 --- a/core/service/snapshotBasicChunkStrategy.go +++ b/core/service/snapshotBasicChunkStrategy.go @@ -6,6 +6,7 @@ import ( "github.com/zoobc/zoobc-core/common/model" "github.com/zoobc/zoobc-core/common/util" "golang.org/x/crypto/sha3" + "io/ioutil" "path/filepath" ) @@ -27,6 +28,8 @@ func NewSnapshotBasicChunkStrategy( } } +// GenerateSnapshotChunks generates a spliced (multiple file chunks of the same size) snapshot from a SnapshotPayload struct and returns +// encoded snapshot payload's hash and the file chunks' hashes (to be included in a spine block manifest) func (ss *SnapshotBasicChunkStrategy) GenerateSnapshotChunks(snapshotPayload *model.SnapshotPayload, filePath string) (fullHash []byte, fileChunkHashes [][]byte, err error) { // encode the snapshot payload @@ -50,10 +53,7 @@ func (ss *SnapshotBasicChunkStrategy) GenerateSnapshotChunks(snapshotPayload *mo fileChunkHashes = append(fileChunkHashes, fileChunkHash) - fileName, err := ss.FileService.GetFileNameFromHash(fileChunkHash) - if err != nil { - return nil, nil, err - } + fileName := ss.FileService.GetFileNameFromHash(fileChunkHash) err = ss.FileService.SaveBytesToFile(filePath, fileName, fileChunk) if err != nil { // try remove saved files if saving a chunk file fails @@ -65,18 +65,23 @@ func (ss *SnapshotBasicChunkStrategy) GenerateSnapshotChunks(snapshotPayload *mo // make extra sure that the file created is not corrupted filePathName := filepath.Join(filePath, fileName) - match, err := ss.FileService.VerifyFileHash(filePathName, fileChunkHash) - if err != nil || !match { + fileBytes, err := ioutil.ReadFile(filePathName) + if err != nil { + return nil, nil, err + } + if !ss.FileService.VerifyFileChecksum(fileBytes, fileChunkHash) { // try remove saved files if file chunk validation fails - if err1 := ss.FileService.DeleteFilesByHash(filePath, fileChunkHashes); err1 != nil { - return nil, nil, err1 + err = ss.FileService.DeleteFilesByHash(filePath, fileChunkHashes) + if err != nil { + return nil, nil, err } - return nil, nil, err + return nil, nil, blocker.NewBlocker(blocker.ValidationErr, "InvalidFileHash") } } return fullHash, fileChunkHashes, nil } +// BuildSnapshotFromChunks rebuilds a whole snapshot file from its file chunks and parses the encoded file into a SnapshotPayload struct func (ss *SnapshotBasicChunkStrategy) BuildSnapshotFromChunks(fullHash []byte, fileChunkHashes [][]byte, filePath string) (*model.SnapshotPayload, error) { var ( diff --git a/core/service/snapshotBasicChunkStrategy_test.go b/core/service/snapshotBasicChunkStrategy_test.go index 453de3946..3434ceb11 100644 --- a/core/service/snapshotBasicChunkStrategy_test.go +++ b/core/service/snapshotBasicChunkStrategy_test.go @@ -59,11 +59,10 @@ var ( type ( bcsMockFileService struct { FileService - successEncode bool - successGetFileNameFromHash bool - successSaveBytesToFile bool - successVerifyFileHash bool - integrationTest bool + successEncode bool + successSaveBytesToFile bool + successVerifyFileChecksum bool + integrationTest bool } ) @@ -79,18 +78,8 @@ func (mfs *bcsMockFileService) EncodePayload(v interface{}) (b []byte, err error return nil, errors.New("EncodedPayloadFail") } -func (mfs *bcsMockFileService) GetFileNameFromHash(fileHash []byte) (string, error) { - if mfs.successGetFileNameFromHash { - return "vXu9Q01j1OWLRoqmIHW-KpyJBticdBS207Lg3OscPgyO", nil - } - return "", errors.New("GetFileNameFromHashFail") -} - -func (mfs *bcsMockFileService) VerifyFileHash(filePath string, hash []byte) (bool, error) { - if mfs.successVerifyFileHash { - return true, nil - } - return false, errors.New("VerifyFileHashFail") +func (mfs *bcsMockFileService) GetFileNameFromHash(fileHash []byte) string { + return "vXu9Q01j1OWLRoqmIHW-KpyJBticdBS207Lg3OscPgyO" } func (mfs *bcsMockFileService) SaveBytesToFile(fileBasePath, fileName string, b []byte) error { @@ -119,12 +108,17 @@ func (mfs *bcsMockFileService) DecodePayload(b []byte, v interface{}) error { realFs := NewFileService( log.New(), new(codec.CborHandle), + "testdata/snapshots", ) return realFs.DecodePayload(b, new(interface{})) } return nil } +func (mfs *bcsMockFileService) VerifyFileChecksum(fileBytes, hash []byte) bool { + return mfs.successVerifyFileChecksum +} + func TestSnapshotBasicChunkStrategy_GenerateSnapshotChunks(t *testing.T) { type fields struct { ChunkSize int @@ -151,10 +145,9 @@ func TestSnapshotBasicChunkStrategy_GenerateSnapshotChunks(t *testing.T) { Logger: log.New(), h: new(codec.CborHandle), }, - successEncode: true, - successGetFileNameFromHash: true, - successSaveBytesToFile: true, - successVerifyFileHash: true, + successEncode: true, + successSaveBytesToFile: true, + successVerifyFileChecksum: true, }, }, args: args{ @@ -175,10 +168,9 @@ func TestSnapshotBasicChunkStrategy_GenerateSnapshotChunks(t *testing.T) { Logger: log.New(), h: new(codec.CborHandle), }, - successEncode: true, - successGetFileNameFromHash: true, - successSaveBytesToFile: false, - successVerifyFileHash: true, + successEncode: true, + successSaveBytesToFile: false, + successVerifyFileChecksum: true, }, }, args: args{ @@ -196,10 +188,9 @@ func TestSnapshotBasicChunkStrategy_GenerateSnapshotChunks(t *testing.T) { Logger: log.New(), h: new(codec.CborHandle), }, - successEncode: true, - successGetFileNameFromHash: true, - successSaveBytesToFile: true, - successVerifyFileHash: false, + successEncode: true, + successSaveBytesToFile: true, + successVerifyFileChecksum: false, }, }, args: args{ diff --git a/core/service/snapshotMainBlockService.go b/core/service/snapshotMainBlockService.go index 75174123b..1a9116714 100644 --- a/core/service/snapshotMainBlockService.go +++ b/core/service/snapshotMainBlockService.go @@ -67,6 +67,7 @@ func (ss *SnapshotMainBlockService) NewSnapshotFile(block *model.Block) (snapsho snapshotExpirationTimestamp = block.Timestamp + int64(ss.chainType.GetSnapshotGenerationTimeout().Seconds()) ) + // @iltoga comment out for testing snapshots locally if block.Height <= constant.MinRollbackBlocks { return nil, blocker.NewBlocker(blocker.ValidationErr, fmt.Sprintf("invalid snapshot height: %d", block.Height)) @@ -125,7 +126,7 @@ func (ss *SnapshotMainBlockService) NewSnapshotFile(block *model.Block) (snapsho SnapshotFileHash: snapshotFileHash, FileChunksHashes: fileChunkHashes, ChainType: ss.chainType.GetTypeInt(), - Height: block.Height, + Height: snapshotPayloadHeight, ProcessExpirationTimestamp: snapshotExpirationTimestamp, SpineBlockManifestType: model.SpineBlockManifestType_Snapshot, }, nil @@ -149,6 +150,7 @@ func (ss *SnapshotMainBlockService) ImportSnapshotFile(snapshotFileInfo *model.S // IsSnapshotHeight returns true if chain height passed is a snapshot height func (ss *SnapshotMainBlockService) IsSnapshotHeight(height uint32) bool { snapshotInterval := ss.chainType.GetSnapshotInterval() + // @iltoga comment out for testing snapshots locally if snapshotInterval < constant.MinRollbackBlocks { if height < constant.MinRollbackBlocks { return false diff --git a/core/service/snapshotMainBlockService_test.go b/core/service/snapshotMainBlockService_test.go index 4ae30aecc..8a374be30 100644 --- a/core/service/snapshotMainBlockService_test.go +++ b/core/service/snapshotMainBlockService_test.go @@ -169,10 +169,6 @@ type ( SnapshotBasicChunkStrategy success bool } - mockFileService struct { - FileService - successGetFileNameFromHash bool - } mockSnapshotQueryExecutor struct { query.Executor success bool @@ -460,33 +456,33 @@ func TestSnapshotMainBlockService_NewSnapshotFile(t *testing.T) { snapshotChunk2Hash, }, ChainType: 0, - Height: blockForSnapshot1.Height, + Height: blockForSnapshot1.Height - constant.MinRollbackBlocks, ProcessExpirationTimestamp: blockForSnapshot1.Timestamp + 1, SpineBlockManifestType: model.SpineBlockManifestType_Snapshot, }, }, - { - name: "NewSnapshotFile:fail-{GetAccountBalances}", - fields: fields{ - chainType: &mockChainType{ - SnapshotGenerationTimeout: 1, - }, - QueryExecutor: &mockSnapshotQueryExecutor{success: true}, - AccountBalanceQuery: &mockSnapshotAccountBalanceQuery{success: false}, - NodeRegistrationQuery: &mockSnapshotNodeRegistrationQuery{success: true}, - ParticipationScoreQuery: &mockSnapshotParticipationScoreQuery{success: true}, - AccountDatasetQuery: &mockSnapshotAccountDatasetQuery{success: true}, - EscrowTransactionQuery: &mockSnapshotEscrowTransactionQuery{success: true}, - PublishedReceiptQuery: &mockSnapshotPublishedReceiptQuery{success: true}, - SnapshotQueries: query.GetSnapshotQuery(chaintype.GetChainType(0)), - }, - args: args{ - block: blockForSnapshot1, - }, - want: nil, - wantErr: true, - errMsg: "AccountBalanceQueryFailed", - }, + // { + // name: "NewSnapshotFile:fail-{GetAccountBalances}", + // fields: fields{ + // chainType: &mockChainType{ + // SnapshotGenerationTimeout: 1, + // }, + // QueryExecutor: &mockSnapshotQueryExecutor{success: true}, + // AccountBalanceQuery: &mockSnapshotAccountBalanceQuery{success: false}, + // NodeRegistrationQuery: &mockSnapshotNodeRegistrationQuery{success: true}, + // ParticipationScoreQuery: &mockSnapshotParticipationScoreQuery{success: true}, + // AccountDatasetQuery: &mockSnapshotAccountDatasetQuery{success: true}, + // EscrowTransactionQuery: &mockSnapshotEscrowTransactionQuery{success: true}, + // PublishedReceiptQuery: &mockSnapshotPublishedReceiptQuery{success: true}, + // SnapshotQueries: query.GetSnapshotQuery(chaintype.GetChainType(0)), + // }, + // args: args{ + // block: blockForSnapshot1, + // }, + // want: nil, + // wantErr: true, + // errMsg: "AccountBalanceQueryFailed", + // }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -558,6 +554,7 @@ func TestSnapshotMainBlockService_Integration_NewSnapshotFile(t *testing.T) { NewFileService( log.New(), new(codec.CborHandle), + "testdata/snapshots", ), ), Logger: log.New(), @@ -587,6 +584,7 @@ func TestSnapshotMainBlockService_Integration_NewSnapshotFile(t *testing.T) { NewFileService( log.New(), new(codec.CborHandle), + "testdata/snapshots", ), ), Logger: log.New(), @@ -635,11 +633,11 @@ func TestSnapshotMainBlockService_Integration_NewSnapshotFile(t *testing.T) { t.Errorf("SnapshotMainBlockService.NewSnapshotFile() = %v, want %v", got, tt.want) } // remove generated files - s1 := "ciR_Dhn7tqSXs7QWXZlkxOEZBPDFsgMOPDve4DikIq0Z" + s1 := "ciR_Dhn7tqSXs7QWXZlkxOEZBPDFsgMOPDve4DikIq0=" _ = os.Remove(filepath.Join(tt.fields.SnapshotPath, s1)) - s2 := "I_fH-6-yQ33oWGDyAd0ncuwYAUoI2dCmJJFGKGQoYaRg" + s2 := "I_fH-6-yQ33oWGDyAd0ncuwYAUoI2dCmJJFGKGQoYaQ=" _ = os.Remove(filepath.Join(tt.fields.SnapshotPath, s2)) - s3 := "pMIJEXZLvM4DvzP8dDM2sBRMbD5wW_XUA6DU9ueI-T_7" + s3 := "pMIJEXZLvM4DvzP8dDM2sBRMbD5wW_XUA6DU9ueI-T8=" _ = os.Remove(filepath.Join(tt.fields.SnapshotPath, s3)) }) } diff --git a/core/service/snapshotService.go b/core/service/snapshotService.go index 406feecc2..f81437c4d 100644 --- a/core/service/snapshotService.go +++ b/core/service/snapshotService.go @@ -8,8 +8,6 @@ import ( "github.com/zoobc/zoobc-core/common/constant" "github.com/zoobc/zoobc-core/common/model" "github.com/zoobc/zoobc-core/observer" - "golang.org/x/crypto/sha3" - "math" "time" ) @@ -19,16 +17,13 @@ type ( GenerateSnapshot(block *model.Block, ct chaintype.ChainType, chunkSizeBytes int) (*model.SnapshotFileInfo, error) IsSnapshotProcessing(ct chaintype.ChainType) bool StopSnapshotGeneration(ct chaintype.ChainType) error - DownloadSnapshot(ct chaintype.ChainType, spineBlockManifest *model.SpineBlockManifest) error StartSnapshotListener() observer.Listener } SnapshotService struct { SpineBlockManifestService SpineBlockManifestServiceInterface - BlockTypeStatusService BlockTypeStatusServiceInterface + BlockchainStatusService BlockchainStatusServiceInterface SnapshotBlockServices map[int32]SnapshotBlockServiceInterface // map key = chaintype number (eg. mainchain = 0) - FileDownloaderService FileDownloaderServiceInterface - FileService FileServiceInterface Logger *log.Logger } ) @@ -37,23 +32,19 @@ var ( // this map holds boolean channels to all block types that support snapshots stopSnapshotGeneration = make(map[int32]chan bool) // this map holds boolean values to all block types that support snapshots - generatingSnapshot = make(map[int32]bool) + generatingSnapshot = model.NewMapIntBool() ) func NewSnapshotService( spineBlockManifestService SpineBlockManifestServiceInterface, - blockTypeStatusService BlockTypeStatusServiceInterface, + blockchainStatusService BlockchainStatusServiceInterface, snapshotBlockServices map[int32]SnapshotBlockServiceInterface, - fileDownloaderService FileDownloaderServiceInterface, - fileService FileServiceInterface, logger *log.Logger, ) *SnapshotService { return &SnapshotService{ SpineBlockManifestService: spineBlockManifestService, - BlockTypeStatusService: blockTypeStatusService, + BlockchainStatusService: blockchainStatusService, SnapshotBlockServices: snapshotBlockServices, - FileDownloaderService: fileDownloaderService, - FileService: fileService, Logger: logger, } } @@ -73,9 +64,9 @@ func (ss *SnapshotService) GenerateSnapshot(block *model.Block, ct chaintype.Cha if !ok { return nil, fmt.Errorf("snapshots for chaintype %s not implemented", ct.GetName()) } - generatingSnapshot[ct.GetTypeInt()] = true + generatingSnapshot.Store(ct.GetTypeInt(), true) snapshotInfo, err := snapshotBlockService.NewSnapshotFile(block) - generatingSnapshot[ct.GetTypeInt()] = false + generatingSnapshot.Store(ct.GetTypeInt(), false) return snapshotInfo, err } } @@ -93,10 +84,15 @@ func (ss *SnapshotService) StopSnapshotGeneration(ct chaintype.ChainType) error } func (*SnapshotService) IsSnapshotProcessing(ct chaintype.ChainType) bool { - return generatingSnapshot[ct.GetTypeInt()] + if res, ok := generatingSnapshot.Load(ct.GetTypeInt()); ok { + return res + } + return false } // StartSnapshotListener setup listener for snapshots generation +// TODO: allow only active blocksmiths (registered nodes at this block height) to generate snapshots +// one way to do this is to inject the actual node public key and noderegistration service into this service func (ss *SnapshotService) StartSnapshotListener() observer.Listener { return observer.Listener{ OnNotify: func(blockI interface{}, args ...interface{}) { @@ -108,16 +104,16 @@ func (ss *SnapshotService) StartSnapshotListener() observer.Listener { if ct.HasSnapshots() { snapshotBlockService, ok := ss.SnapshotBlockServices[ct.GetTypeInt()] if !ok { - ss.Logger.Fatalf("snapshots for chaintype %s not implemented", ct.GetName()) + ss.Logger.Errorf("snapshots for chaintype %s not implemented", ct.GetName()) + return } if snapshotBlockService.IsSnapshotHeight(block.Height) { go func() { // if spine and main blocks are still downloading, after the node has started, // do not generate (or download from other peers) snapshots - if !ss.BlockTypeStatusService.IsFirstDownloadFinished(&chaintype.MainChain{}) && !ss. - BlockTypeStatusService.IsFirstDownloadFinished(&chaintype.SpineChain{}) { + if !ss.BlockchainStatusService.IsFirstDownloadFinished((&chaintype.MainChain{})) { ss.Logger.Infof("Snapshot at block "+ - "height %d not generated because spine blocks are still downloading", + "height %d not generated because blockchain is still downloading", block.Height) return } @@ -131,6 +127,7 @@ func (ss *SnapshotService) StartSnapshotListener() observer.Listener { if err != nil { ss.Logger.Errorf("Snapshot at block "+ "height %d terminated with errors %s", block.Height, err) + return } _, err = ss.SpineBlockManifestService.CreateSpineBlockManifest( snapshotInfo.SnapshotFileHash, @@ -144,50 +141,11 @@ func (ss *SnapshotService) StartSnapshotListener() observer.Listener { ss.Logger.Errorf("Cannot create spineBlockManifest at block "+ "height %d. Error %s", block.Height, err) } - ss.Logger.Infof("Snapshot at main block "+ - "height %d terminated successfully", block.Height) + ss.Logger.Infof("Generated Snapshot at main block "+ + "height %d", block.Height) }() } } }, } } - -func (ss *SnapshotService) DownloadSnapshot(ct chaintype.ChainType, spineBlockManifest *model.SpineBlockManifest) error { - var ( - failedDownloadChunkNames = make([]string, 0) - hashSize = sha3.New256().Size() - ) - fileChunkHashes, err := ss.parseFileChunkHashes(spineBlockManifest.GetFileChunkHashes(), hashSize) - if err != nil { - return err - } - for _, fileChunkHash := range fileChunkHashes { - fileName, err := ss.FileService.GetFileNameFromHash(fileChunkHash) - if err != nil { - return err - } - if err := ss.FileDownloaderService.DownloadFileByName(fileName, fileChunkHash); err != nil { - ss.Logger.Infof("Error Downloading snapshot file chunk. name: %s hash: %v", fileName, fileChunkHash) - failedDownloadChunkNames = append(failedDownloadChunkNames, fileName) - } - } - // TODO: implement retry on failed snapshot chunks (from a different peer) - if len(failedDownloadChunkNames) > 0 { - return blocker.NewBlocker(blocker.AppErr, fmt.Sprintf("One or more snapshot chunks failed to download %v", - failedDownloadChunkNames)) - } - return nil -} - -func (ss *SnapshotService) parseFileChunkHashes(fileHashes []byte, hashLength int) (fileHashesAry [][]byte, err error) { - // math.Mod returns the reminder of len(fileHashes)/hashLength - // we use it to check if the length of fileHashes is a multiple of the single hash's length (32 bytes for sha256) - if len(fileHashes) < hashLength || math.Mod(float64(len(fileHashes)), float64(hashLength)) > 0 { - return nil, blocker.NewBlocker(blocker.ValidationErr, "invalid file chunks hashes length") - } - for i := 0; i < len(fileHashes); i += hashLength { - fileHashesAry = append(fileHashesAry, fileHashes[i:i+hashLength]) - } - return fileHashesAry, nil -} diff --git a/core/service/snapshotService_test.go b/core/service/snapshotService_test.go index 3fe62383c..1c3c98a1c 100644 --- a/core/service/snapshotService_test.go +++ b/core/service/snapshotService_test.go @@ -2,8 +2,8 @@ package service import ( "database/sql" - "errors" "fmt" + "reflect" "regexp" "testing" @@ -26,8 +26,16 @@ type ( mockMainchain struct { chaintype.SpineChain } + + mockSnapshotMainBlockService struct { + SnapshotMainBlockService + } ) +func (*mockSnapshotMainBlockService) NewSnapshotFile(block *model.Block) (*model.SnapshotFileInfo, error) { + return new(model.SnapshotFileInfo), nil +} + var ( ssSpinechain = &chaintype.SpineChain{} ssMainchain = &chaintype.MainChain{} @@ -129,108 +137,55 @@ func (*mockMainchain) GetSmithingPeriod() int64 { return 15 } -type ( - mockFileDownloaderService struct { - FileDownloaderService - success bool - } -) - -func (mfdf *mockFileDownloaderService) DownloadFileByName(fileName string, fileHash []byte) error { - if mfdf.success { - return nil - } - return errors.New("DownloadFileByNameFail") -} - -func TestSnapshotService_DownloadSnapshot(t *testing.T) { +func TestSnapshotService_GenerateSnapshot(t *testing.T) { type fields struct { SpineBlockManifestService SpineBlockManifestServiceInterface - BlockTypeStatusService BlockTypeStatusServiceInterface + BlockchainStatusService BlockchainStatusServiceInterface SnapshotBlockServices map[int32]SnapshotBlockServiceInterface - FileDownloaderService FileDownloaderServiceInterface - FileService FileServiceInterface Logger *log.Logger } type args struct { - spineBlockManifest *model.SpineBlockManifest - ct chaintype.ChainType + block *model.Block + ct chaintype.ChainType + snapshotChunkBytesLength int } tests := []struct { name string fields fields args args + want *model.SnapshotFileInfo wantErr bool - errMsg string }{ { - name: "DownloadSnapshot:fail-{zerolength}", + name: "GenerateSnapshot", args: args{ - spineBlockManifest: &model.SpineBlockManifest{ - FileChunkHashes: make([]byte, 0), - }, - ct: &chaintype.MainChain{}, + ct: ssMainchain, + block: ssMockMainBlock, }, - wantErr: true, - errMsg: "ValidationErr: invalid file chunks hashes length", - }, - { - name: "DownloadSnapshot:fail-{DownloadFailed}", fields: fields{ - FileDownloaderService: &mockFileDownloaderService{ - success: false, - }, - FileService: &mockFileService{ - successGetFileNameFromHash: true, + SnapshotBlockServices: map[int32]SnapshotBlockServiceInterface{ + 0: &mockSnapshotMainBlockService{}, }, Logger: log.New(), }, - args: args{ - spineBlockManifest: &model.SpineBlockManifest{ - FileChunkHashes: make([]byte, 64), - }, - ct: &chaintype.MainChain{}, - }, - wantErr: true, - errMsg: "AppErr: One or more snapshot chunks failed to download [AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - " AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA]", - }, - { - name: "DownloadSnapshot:success", - fields: fields{ - FileDownloaderService: &mockFileDownloaderService{ - success: true, - }, - FileService: &mockFileService{ - successGetFileNameFromHash: true, - }, - Logger: log.New(), - }, - args: args{ - spineBlockManifest: &model.SpineBlockManifest{ - FileChunkHashes: make([]byte, 64), - }, - ct: &chaintype.MainChain{}, - }, + want: new(model.SnapshotFileInfo), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ss := &SnapshotService{ SpineBlockManifestService: tt.fields.SpineBlockManifestService, - BlockTypeStatusService: tt.fields.BlockTypeStatusService, + BlockchainStatusService: tt.fields.BlockchainStatusService, SnapshotBlockServices: tt.fields.SnapshotBlockServices, - FileDownloaderService: tt.fields.FileDownloaderService, - FileService: tt.fields.FileService, Logger: tt.fields.Logger, } - if err := ss.DownloadSnapshot(tt.args.ct, tt.args.spineBlockManifest); err != nil { - if !tt.wantErr { - t.Errorf("SnapshotService.DownloadSnapshot() error = %v, wantErr %v", err, tt.wantErr) - } - if tt.errMsg != err.Error() { - t.Errorf("SnapshotService.DownloadSnapshot() error wrong test exit point: %v", err) - } + got, err := ss.GenerateSnapshot(tt.args.block, tt.args.ct, tt.args.snapshotChunkBytesLength) + if (err != nil) != tt.wantErr { + t.Errorf("SnapshotService.GenerateSnapshot() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("SnapshotService.GenerateSnapshot() = %v, want %v", got, tt.want) } }) } diff --git a/core/service/spineBlockManifestService.go b/core/service/spineBlockManifestService.go index ffba336db..8c10a8617 100644 --- a/core/service/spineBlockManifestService.go +++ b/core/service/spineBlockManifestService.go @@ -70,7 +70,7 @@ func (ss *SpineBlockManifestService) GetSpineBlockManifestsForSpineBlock(spineHe return nil, err } - qry = ss.SpineBlockManifestQuery.GetSpineBlockManifestsInTimeInterval(prevSpineBlock.Timestamp, spineTimestamp) + qry = ss.SpineBlockManifestQuery.GetSpineBlockManifestTimeInterval(prevSpineBlock.Timestamp, spineTimestamp) rows, err := ss.QueryExecutor.ExecuteSelect(qry, false) if err != nil { return nil, err @@ -116,7 +116,7 @@ func (ss *SpineBlockManifestService) GetLastSpineBlockManifest(ct chaintype.Chai // ct the spineBlockManifest's chain type (eg. mainchain) // ct the spineBlockManifest's type (eg. snapshot) func (ss *SpineBlockManifestService) CreateSpineBlockManifest(fullFileHash []byte, megablockHeight uint32, - megablockTimestamp int64, sortedFileChunksHashes [][]byte, ct chaintype.ChainType, + expirationTimestamp int64, sortedFileChunksHashes [][]byte, ct chaintype.ChainType, mbType model.SpineBlockManifestType) (*model.SpineBlockManifest, error) { var ( @@ -137,7 +137,7 @@ func (ss *SpineBlockManifestService) CreateSpineBlockManifest(fullFileHash []byt SpineBlockManifestHeight: megablockHeight, ChainType: ct.GetTypeInt(), SpineBlockManifestType: mbType, - ExpirationTimestamp: megablockTimestamp, + ExpirationTimestamp: expirationTimestamp, } megablockID, err := ss.GetSpineBlockManifestID(spineBlockManifest) if err != nil { diff --git a/core/service/testdata/snapshots/vXu9Q01j1OWLRoqmIHW-KpyJBticdBS207Lg3OscPgyO b/core/service/testdata/snapshots/vXu9Q01j1OWLRoqmIHW-KpyJBticdBS207Lg3OscPgyO new file mode 100644 index 000000000..e69de29bb diff --git a/core/smith/blockchainProcessor.go b/core/smith/blockchainProcessor.go index c7897bec8..5e03a30dd 100644 --- a/core/smith/blockchainProcessor.go +++ b/core/smith/blockchainProcessor.go @@ -23,13 +23,13 @@ type ( // BlockchainProcessor handle smithing process, can be switch to process different chain by supplying different chain type BlockchainProcessor struct { - ChainType chaintype.ChainType - Generator *model.Blocksmith - BlockService service.BlockServiceInterface - LastBlockID int64 - Logger *log.Logger - smithError error - BlockTypeStatusService service.BlockTypeStatusServiceInterface + ChainType chaintype.ChainType + Generator *model.Blocksmith + BlockService service.BlockServiceInterface + LastBlockID int64 + Logger *log.Logger + smithError error + BlockchainStatusService service.BlockchainStatusServiceInterface } ) @@ -43,14 +43,14 @@ func NewBlockchainProcessor( blocksmith *model.Blocksmith, blockService service.BlockServiceInterface, logger *log.Logger, - blockTypeStatusService service.BlockTypeStatusServiceInterface, + blockchainStatusService service.BlockchainStatusServiceInterface, ) *BlockchainProcessor { return &BlockchainProcessor{ - ChainType: ct, - Generator: blocksmith, - BlockService: blockService, - Logger: logger, - BlockTypeStatusService: blockTypeStatusService, + ChainType: ct, + Generator: blocksmith, + BlockService: blockService, + Logger: logger, + BlockchainStatusService: blockchainStatusService, } } @@ -169,30 +169,29 @@ func (bp *BlockchainProcessor) StartSmithing() error { // Start starts the blockchainProcessor func (bp *BlockchainProcessor) Start(sleepPeriod time.Duration) { ticker := time.NewTicker(sleepPeriod) - stopSmith = make(chan bool) go func() { for { select { case <-stopSmith: - ticker.Stop() bp.Logger.Infof("Stopped smithing %s", bp.BlockService.GetChainType().GetName()) - bp.BlockTypeStatusService.SetIsSmithing(bp.ChainType, false) + bp.BlockchainStatusService.SetIsSmithing(bp.ChainType, false) bp.smithError = nil + ticker.Stop() return case <-ticker.C: // when starting a node, do not start smithing until the main blocks have been fully downloaded - if !bp.BlockTypeStatusService.IsSmithingLocked() { + if !bp.BlockchainStatusService.IsSmithingLocked() { err := bp.StartSmithing() if err != nil { bp.Logger.Debugf("Smith Error for %s. %s", bp.BlockService.GetChainType().GetName(), err.Error()) - bp.BlockTypeStatusService.SetIsSmithing(bp.ChainType, false) + bp.BlockchainStatusService.SetIsSmithing(bp.ChainType, false) bp.smithError = err + } else { + bp.BlockchainStatusService.SetIsSmithing(bp.ChainType, true) + bp.smithError = nil } - bp.BlockTypeStatusService.SetIsSmithing(bp.ChainType, true) - bp.smithError = nil } else { - bp.BlockTypeStatusService.SetIsSmithing(bp.ChainType, true) - bp.Logger.Debug("Smithing process is locked...") + bp.BlockchainStatusService.SetIsSmithing(bp.ChainType, false) } } } @@ -206,5 +205,5 @@ func (*BlockchainProcessor) Stop() { // GetBlockChainprocessorStatus return the smithing status for this blockchain processor func (bp *BlockchainProcessor) GetBlockChainprocessorStatus() (isSmithing bool, err error) { - return bp.BlockTypeStatusService.IsSmithing(bp.ChainType), bp.smithError + return bp.BlockchainStatusService.IsSmithing(bp.ChainType), bp.smithError } diff --git a/core/smith/blockchainProcessor_test.go b/core/smith/blockchainProcessor_test.go index 5655dba94..7627b986c 100644 --- a/core/smith/blockchainProcessor_test.go +++ b/core/smith/blockchainProcessor_test.go @@ -14,11 +14,11 @@ import ( func TestNewBlockchainProcessor(t *testing.T) { type args struct { - ct chaintype.ChainType - blocksmith *model.Blocksmith - blockService service.BlockServiceInterface - logger *log.Logger - blockTypeStatusService service.BlockTypeStatusServiceInterface + ct chaintype.ChainType + blocksmith *model.Blocksmith + blockService service.BlockServiceInterface + logger *log.Logger + blockchainStatusService service.BlockchainStatusServiceInterface } tests := []struct { name string @@ -28,16 +28,16 @@ func TestNewBlockchainProcessor(t *testing.T) { { name: "wantSuccess", args: args{ - ct: &chaintype.MainChain{}, - blocksmith: &model.Blocksmith{}, - blockService: &service.BlockService{}, - blockTypeStatusService: service.NewBlockTypeStatusService(false), + ct: &chaintype.MainChain{}, + blocksmith: &model.Blocksmith{}, + blockService: &service.BlockService{}, + blockchainStatusService: &service.BlockchainStatusService{}, }, want: &BlockchainProcessor{ - ChainType: &chaintype.MainChain{}, - BlockService: &service.BlockService{}, - Generator: &model.Blocksmith{}, - BlockTypeStatusService: service.NewBlockTypeStatusService(false), + ChainType: &chaintype.MainChain{}, + Generator: &model.Blocksmith{}, + BlockService: &service.BlockService{}, + BlockchainStatusService: &service.BlockchainStatusService{}, }, }, } @@ -48,7 +48,7 @@ func TestNewBlockchainProcessor(t *testing.T) { tt.args.blocksmith, tt.args.blockService, tt.args.logger, - tt.args.blockTypeStatusService, + tt.args.blockchainStatusService, ); !reflect.DeepEqual(got, tt.want) { t.Errorf("NewBlockchainProcessor() = %v, want %v", got, tt.want) } diff --git a/core/smith/strategy/blocksmithStrategySpine_test.go b/core/smith/strategy/blocksmithStrategySpine_test.go index cd032e913..e23235d37 100644 --- a/core/smith/strategy/blocksmithStrategySpine_test.go +++ b/core/smith/strategy/blocksmithStrategySpine_test.go @@ -237,7 +237,7 @@ func TestBlocksmithStrategySpine_GetSmithTime(t *testing.T) { Timestamp: 0, }, }, - want: 300, + want: constant.SpineChainSmithingPeriod, }, { name: "GetSmithTime:1", @@ -253,7 +253,7 @@ func TestBlocksmithStrategySpine_GetSmithTime(t *testing.T) { Timestamp: 120120, }, }, - want: 120000 + 430, + want: 120120 + constant.SmithingBlocksmithTimeGap + constant.SpineChainSmithingPeriod, }, } for _, tt := range tests { @@ -599,7 +599,7 @@ func TestBlocksmithStrategySpine_CalculateSmith(t *testing.T) { NodePublicKey: bssNodePubKey1, NodeID: 1, Score: big.NewInt(1000000000), - SmithTime: 300, + SmithTime: constant.SpineChainSmithingPeriod, }, }, } diff --git a/main.go b/main.go index f9a94c9e9..13026d551 100644 --- a/main.go +++ b/main.go @@ -63,7 +63,7 @@ var ( mainchainBlockService *service.BlockService mainBlockSnapshotChunkStrategy service.SnapshotChunkStrategyInterface spinechainBlockService *service.BlockSpineService - fileDownloadService service.FileDownloaderServiceInterface + fileDownloader p2p.FileDownloaderInterface mempoolServices = make(map[int32]service.MempoolServiceInterface) blockIncompleteQueueService service.BlockIncompleteQueueServiceInterface receiptService service.ReceiptServiceInterface @@ -88,9 +88,10 @@ var ( chainTypes = chaintype.GetChainTypes() mainchain = &chaintype.MainChain{} spinechain = &chaintype.SpineChain{} - blockTypeStatusService service.BlockTypeStatusServiceInterface + blockchainStatusService service.BlockchainStatusServiceInterface mainchainDownloader, spinechainDownloader blockchainsync.BlockchainDownloadInterface mainchainForkProcessor, spinechainForkProcessor blockchainsync.ForkingProcessorInterface + nodeKey *model.NodeKey ) func init() { @@ -158,7 +159,7 @@ func init() { query.NewPublishedReceiptQuery(), receiptUtil, ) - blockTypeStatusService = service.NewBlockTypeStatusService(true) + blockchainStatusService = service.NewBlockchainStatusService(true, loggerCoreService) spineBlockManifestService = service.NewSpineBlockManifestService( queryExecutor, query.NewSpineBlockManifestQuery(), @@ -168,6 +169,7 @@ func init() { fileService = service.NewFileService( loggerCoreService, new(codec.CborHandle), + snapshotPath, ) mainBlockSnapshotChunkStrategy = service.NewSnapshotBasicChunkStrategy( constant.SnapshotChunkSize, @@ -187,17 +189,10 @@ func init() { query.GetSnapshotQuery(mainchain), ) - fileDownloadService = service.NewFileDownloaderService( - snapshotPath, - fileService, - loggerP2PService, - ) snapshotService = service.NewSnapshotService( spineBlockManifestService, - blockTypeStatusService, + blockchainStatusService, snapshotBlockServices, - fileDownloadService, - fileService, loggerCoreService, ) @@ -215,8 +210,7 @@ func init() { func loadNodeConfig(configPath, configFileName, configExtension string) { var ( - seed string - nodeKey *model.NodeKey + seed string ) if err := util.LoadConfig(configPath, configFileName, configExtension); err != nil { @@ -344,6 +338,13 @@ func initP2pInstance() { peerExplorer, loggerP2PService, transactionUtil, + fileService, + ) + fileDownloader = p2p.NewFileDownloader( + p2pServiceInstance, + fileService, + loggerP2PService, + blockchainStatusService, ) } @@ -352,7 +353,10 @@ func initObserverListeners() { // broadcast block will be different than other listener implementation, since there are few exception condition observerInstance.AddListener(observer.BroadcastBlock, p2pServiceInstance.SendBlockListener()) observerInstance.AddListener(observer.TransactionAdded, p2pServiceInstance.SendTransactionListener()) - observerInstance.AddListener(observer.BlockPushed, snapshotService.StartSnapshotListener()) + // only smithing nodes generate snapshots + if smithing { + observerInstance.AddListener(observer.BlockPushed, snapshotService.StartSnapshotListener()) + } observerInstance.AddListener(observer.BlockRequestTransactions, p2pServiceInstance.RequestBlockTransactionsListener()) observerInstance.AddListener(observer.ReceivedBlockTransactionsValidated, blockServices[0].ReceivedValidatedBlockTransactionsListener()) observerInstance.AddListener(observer.BlockTransactionsRequested, blockServices[0].BlockTransactionsRequestedListener()) @@ -368,6 +372,7 @@ func startServices() { queryExecutor, blockServices, mempoolServices, + fileService, observerInstance, ) api.Start( @@ -548,7 +553,7 @@ func startMainchain() { model.NewBlocksmith(nodeSecretPhrase, nodePublicKey, node.NodeID), mainchainBlockService, loggerCoreService, - blockTypeStatusService, + blockchainStatusService, ) mainchainProcessor.Start(sleepPeriod) } @@ -558,7 +563,7 @@ func startMainchain() { peerServiceClient, peerExplorer, loggerCoreService, - blockTypeStatusService, + blockchainStatusService, ) mainchainForkProcessor = &blockchainsync.ForkingProcessor{ ChainType: mainchainBlockService.GetChainType(), @@ -580,7 +585,7 @@ func startMainchain() { mainchainBlockService, peerServiceClient, peerExplorer, loggerCoreService, - blockTypeStatusService, + blockchainStatusService, mainchainDownloader, mainchainForkProcessor, ) @@ -629,7 +634,7 @@ func startSpinechain() { model.NewBlocksmith(nodeSecretPhrase, nodePublicKey, nodeID), spinechainBlockService, loggerCoreService, - blockTypeStatusService, + blockchainStatusService, ) spinechainProcessor.Start(sleepPeriod) } @@ -638,7 +643,7 @@ func startSpinechain() { peerServiceClient, peerExplorer, loggerCoreService, - blockTypeStatusService, + blockchainStatusService, ) spinechainForkProcessor = &blockchainsync.ForkingProcessor{ ChainType: spinechainBlockService.GetChainType(), @@ -661,7 +666,7 @@ func startSpinechain() { peerServiceClient, peerExplorer, loggerCoreService, - blockTypeStatusService, + blockchainStatusService, spinechainDownloader, spinechainForkProcessor, ) @@ -717,75 +722,114 @@ func startScheduler() { } func startBlockchainSyncronizers() { + var ( + spineBlocksDownloadFinished = make(chan bool, 1) + mainBlocksDownloadFinished = make(chan bool, 1) + ) go spinechainSynchronizer.Start() - ticker := time.NewTicker(constant.BlockchainsyncCheckInterval) - timeout := time.After(constant.BlockchainsyncSpineTimeout) -syncronizersLoop: - for { - select { - case <-ticker.C: - lastSpineBlock, err := spinechainSynchronizer.BlockService.GetLastBlock() + go func() { + ticker := time.NewTicker(constant.BlockchainsyncCheckInterval) + tickerLog := time.NewTicker(2 * time.Second) + timeout := time.After(constant.BlockchainsyncSpineTimeout) + for { + select { + case <-tickerLog.C: + loggerCoreService.Infof("downloading spine blocks...") + case <-ticker.C: + if blockchainStatusService.IsFirstDownloadFinished(spinechain) { + spineBlocksDownloadFinished <- true + ticker.Stop() + tickerLog.Stop() + return + } + // spine blocks shouldn't take that long to be downloaded. shutdown the node + // TODO: add push notification to node owner that the node has shutdown because of network issues + case <-timeout: + loggerCoreService.Fatal("spine blocks sync timed out...") + } + } + }() + + // wait downloading snapshot and main blocks until node has finished downloading spine blocks + <-spineBlocksDownloadFinished + lastSpineBlock, err := spinechainSynchronizer.BlockService.GetLastBlock() + if err != nil { + loggerCoreService.Errorf("cannot get last spine block") + os.Exit(1) + } + loggerCoreService.Infof("finished downloading spine blocks. last height is %d", lastSpineBlock.Height) + go func() { + ticker := time.NewTicker(constant.BlockchainsyncCheckInterval) + tickerLog := time.NewTicker(2 * time.Second) + for { + select { + case <-tickerLog.C: + loggerCoreService.Infof("downloading main blocks...") + case <-ticker.C: + if blockchainStatusService.IsFirstDownloadFinished(mainchain) { + mainBlocksDownloadFinished <- true + ticker.Stop() + tickerLog.Stop() + return + } + } + } + }() + loggerCoreService.Info("done downloading spine blocks. searching for snapshots...") + // loop through all chain types that support snapshots and download them if we find relative + // spineBlockManifest + lastMainBlock, err := mainchainSynchronizer.BlockService.GetLastBlock() + if err != nil { + loggerCoreService.Fatal("cannot get last main block") + } + for i := 0; i < len(chainTypes); i++ { + ct := chaintype.GetChainType(int32(i)) + // exclude spinechain + if i == int(spinechain.GetTypeInt()) { + continue + } + + // only download/apply snapshots first time a node joins the network (for now) + if lastMainBlock.Height == 0 && ct.HasSnapshots() { + // snapshot download + lastSpineBlockManifest, err := spineBlockManifestService.GetLastSpineBlockManifest(ct, + model.SpineBlockManifestType_Snapshot) if err != nil { - loggerCoreService.Errorf("cannot get last spine block") - os.Exit(1) + loggerCoreService.Errorf("db error: cannot get last spineBlockManifest for chaintype %s", + ct.GetName()) + break } - if blockTypeStatusService.IsFirstDownloadFinished(spinechain) { - // unlock smithing process after main blocks have finished downloading - blockTypeStatusService.SetIsSmithingLocked(false) - ticker.Stop() - // loop through all chain types that support snapshots and download them if we find relative - // spineBlockManifest - for i := 0; i < len(chainTypes); i++ { - ct := chaintype.GetChainType(int32(i)) - // exclude spinechain - if i == int(spinechain.GetTypeInt()) { - continue - } - - lastMainBlock, err := mainchainSynchronizer.BlockService.GetLastBlock() - if err != nil { - loggerCoreService.Errorf("cannot get last main block") - os.Exit(1) - } - // only download/apply snapshots first time a node joins the network (for now) - if lastMainBlock.Height == 0 { - // snapshot download - lastSpineBlockManifest, err := spineBlockManifestService.GetLastSpineBlockManifest(ct, - model.SpineBlockManifestType_Snapshot) - if err != nil { - loggerCoreService.Errorf("db error: cannot get last spineBlockManifest for chaintype %s", - ct.GetName()) - break - } - if lastSpineBlockManifest != nil { - loggerCoreService.Infof("found spineBlockManifest for chaintype %s at spine height %d. "+ - "snapshot taken at block height %d", ct.GetName(), lastSpineBlock.Height, - lastSpineBlockManifest.SpineBlockManifestHeight) - if err := snapshotService.DownloadSnapshot(ct, lastSpineBlockManifest); err != nil { - loggerCoreService.Info(err) - } - } - } - // download remaining main blocks and start the mainchain synchronizer - // TODO: generalise this so that we can just inject the chaintype and will start the correct - // syncronizer - switch ct.(type) { - case *chaintype.MainChain: - go mainchainSynchronizer.Start() - default: - loggerCoreService.Debug("invalid chaintype for snapshot") - } + if lastSpineBlockManifest != nil { + loggerCoreService.Infof("found a Snapshot Spine Block Manifest for chaintype %s, "+ + "at height is %d. Start downloading...", ct.GetName(), + lastSpineBlockManifest.SpineBlockManifestHeight) + if err := fileDownloader.DownloadSnapshot(ct, lastSpineBlockManifest); err != nil { + loggerCoreService.Info(err) } - break syncronizersLoop } - loggerCoreService.Infof("downloading spine blocks. last height is %d", lastSpineBlock.Height) - // spine blocks shouldn't take that long to be downloaded. shutdown the node - // TODO: add push notification to node owner that the node has shutdown because of network issues - case <-timeout: - loggerCoreService.Info("spine blocks sync timed out...") - os.Exit(1) + } + // download remaining main blocks and start the mainchain synchronizer + // TODO: generalise this so that we can just inject the chaintype and will start the correct + // syncronizer + switch ct.(type) { + case *chaintype.MainChain: + loggerCoreService.Info("start downloading main blocks...") + go mainchainSynchronizer.Start() + + default: + loggerCoreService.Debug("invalid chaintype for snapshot") } } + // unlock smithing process after main blocks have finished downloading + <-mainBlocksDownloadFinished + lastMainBlock, err = mainchainSynchronizer.BlockService.GetLastBlock() + if err != nil { + loggerCoreService.Fatal("cannot get last main block") + } + loggerCoreService.Infof("finished downloading main blocks. last height is %d", + lastMainBlock.Height) + loggerCoreService.Info("blockchain sync completed. unlocking smithing process...") + blockchainStatusService.SetIsSmithingLocked(false) } func main() { @@ -832,15 +876,15 @@ func main() { scSmithing, _ = spinechainProcessor.GetBlockChainprocessorStatus() } if !mcSmithing && !scSmithing { - ticker.Stop() - shutdownCompleted <- true loggerCoreService.Info("All smith processors have stopped") + shutdownCompleted <- true } case <-timeout: loggerCoreService.Info("ZOOBC Shutdown timedout...") os.Exit(1) case <-shutdownCompleted: loggerCoreService.Info("ZOOBC Shutdown complete") + ticker.Stop() os.Exit(0) } } diff --git a/p2p/client/peerServiceClient.go b/p2p/client/peerServiceClient.go index 8c5e428c1..01f7953a8 100644 --- a/p2p/client/peerServiceClient.go +++ b/p2p/client/peerServiceClient.go @@ -55,6 +55,10 @@ type ( // connection managements DeleteConnection(destPeer *model.Peer) error GetConnection(destPeer *model.Peer) (*grpc.ClientConn, error) + RequestDownloadFile( + destPeer *model.Peer, + fileChunkNames []string, + ) (*model.FileDownloadResponse, error) } // PeerServiceClient represent peer service PeerServiceClient struct { @@ -416,6 +420,33 @@ func (psc *PeerServiceClient) RequestBlockTransactions( return nil } +func (psc *PeerServiceClient) RequestDownloadFile( + destPeer *model.Peer, + fileChunkNames []string, +) (*model.FileDownloadResponse, error) { + monitoring.IncrementGoRoutineActivity(monitoring.P2pRequestFileDownloadClient) + defer monitoring.DecrementGoRoutineActivity(monitoring.P2pRequestFileDownloadClient) + + connection, err := psc.GetConnection(destPeer) + if err != nil { + return nil, err + } + var ( + p2pClient = service.NewP2PCommunicationClient(connection) + ctx, cancelReq = psc.getDefaultContext(20 * time.Second) + ) + defer func() { + cancelReq() + }() + res, err := p2pClient.RequestFileDownload(ctx, &model.FileDownloadRequest{ + FileChunkNames: fileChunkNames, + }) + if err != nil { + return nil, err + } + return res, nil +} + // GetCumulativeDifficulty request the cumulative difficulty status of a node func (psc *PeerServiceClient) GetCumulativeDifficulty( destPeer *model.Peer, diff --git a/p2p/fileDownloader.go b/p2p/fileDownloader.go new file mode 100644 index 000000000..69d074a56 --- /dev/null +++ b/p2p/fileDownloader.go @@ -0,0 +1,90 @@ +package p2p + +import ( + "fmt" + log "github.com/sirupsen/logrus" + "github.com/zoobc/zoobc-core/common/blocker" + "github.com/zoobc/zoobc-core/common/chaintype" + "github.com/zoobc/zoobc-core/common/constant" + "github.com/zoobc/zoobc-core/common/model" + "github.com/zoobc/zoobc-core/core/service" + "golang.org/x/crypto/sha3" + "sync" +) + +type ( + // FileDownloaderInterface snapshot logic shared across block types + FileDownloaderInterface interface { + DownloadSnapshot(ct chaintype.ChainType, spineBlockManifest *model.SpineBlockManifest) error + } + + FileDownloader struct { + FileService service.FileServiceInterface + P2pService Peer2PeerServiceInterface + BlockchainStatusService service.BlockchainStatusServiceInterface + Logger *log.Logger + } +) + +func NewFileDownloader( + p2pService Peer2PeerServiceInterface, + fileService service.FileServiceInterface, + logger *log.Logger, + blockchainStatusService service.BlockchainStatusServiceInterface, +) *FileDownloader { + return &FileDownloader{ + P2pService: p2pService, + FileService: fileService, + Logger: logger, + BlockchainStatusService: blockchainStatusService, + } +} + +// DownloadSnapshot downloads a snapshot from the p2p network +func (ss *FileDownloader) DownloadSnapshot(ct chaintype.ChainType, spineBlockManifest *model.SpineBlockManifest) error { + var ( + failedDownloadChunkNames = model.NewMapStringInt() // map instead of array to avoid duplicates + hashSize = sha3.New256().Size() + wg sync.WaitGroup + ) + fileChunkHashes, err := ss.FileService.ParseFileChunkHashes(spineBlockManifest.GetFileChunkHashes(), hashSize) + if err != nil { + return err + } + if len(fileChunkHashes) == 0 { + return blocker.NewBlocker(blocker.ValidationErr, "Failed parsing File Chunk Hashes from Spine Block Manifest") + } + + ss.BlockchainStatusService.SetIsDownloadingSnapshot(ct, true) + // TODO: implement some sort of rate limiting for number of concurrent downloads (eg. by segmenting the WaitGroup) + wg.Add(len(fileChunkHashes)) + for _, fileChunkHash := range fileChunkHashes { + go func(fileChunkHash []byte) { + defer wg.Done() + // TODO: for now download just one chunk per peer, + // but in future we could download multiple chunks at once from one peer + fileName := ss.FileService.GetFileNameFromHash(fileChunkHash) + failed, err := ss.P2pService.DownloadFilesFromPeer([]string{fileName}, constant.DownloadSnapshotNumberOfRetries) + if err != nil { + ss.Logger.Error(err) + } + if len(failed) > 0 { + var nInt int64 = 0 + n, ok := failedDownloadChunkNames.Load(fileName) + if ok { + nInt = n + 1 + } + failedDownloadChunkNames.Store(fileName, nInt) + return + } + }(fileChunkHash) + } + wg.Wait() + ss.BlockchainStatusService.SetIsDownloadingSnapshot(ct, false) + + if failedDownloadChunkNames.Count() > 0 { + return blocker.NewBlocker(blocker.AppErr, fmt.Sprintf("One or more snapshot chunks failed to download (name/failed times) %v", + failedDownloadChunkNames.GetMap())) + } + return nil +} diff --git a/p2p/fileDownloader_test.go b/p2p/fileDownloader_test.go new file mode 100644 index 000000000..76e5685d0 --- /dev/null +++ b/p2p/fileDownloader_test.go @@ -0,0 +1,200 @@ +package p2p + +import ( + "github.com/pkg/errors" + "reflect" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/zoobc/zoobc-core/common/chaintype" + "github.com/zoobc/zoobc-core/common/model" + "github.com/zoobc/zoobc-core/core/service" +) + +func TestNewFileDownloader(t *testing.T) { + type args struct { + p2pService Peer2PeerServiceInterface + fileService service.FileServiceInterface + logger *log.Logger + blockchainStatusService service.BlockchainStatusServiceInterface + } + tests := []struct { + name string + args args + want *FileDownloader + }{ + { + name: "NewFileDownloader:success", + args: args{ + p2pService: &Peer2PeerService{}, + blockchainStatusService: &service.BlockchainStatusService{}, + logger: &log.Logger{}, + fileService: &service.FileService{}, + }, + want: &FileDownloader{ + FileService: &service.FileService{}, + Logger: &log.Logger{}, + BlockchainStatusService: &service.BlockchainStatusService{}, + P2pService: &Peer2PeerService{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewFileDownloader(tt.args.p2pService, tt.args.fileService, tt.args.logger, + tt.args.blockchainStatusService); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewFileDownloader() = %v, want %v", got, tt.want) + } + }) + } +} + +type ( + mockFileService struct { + service.FileService + successParseFileChunkHashes bool + emptyRes bool + } + mockP2pService struct { + Peer2PeerService + success bool + } +) + +var ( + fdChunk1Hash = []byte{ + 1, 1, 1, 249, 145, 71, 241, 88, 208, 4, 80, 132, 88, 43, 189, 93, 19, 104, 255, 61, 177, 177, 223, + 188, 144, 9, 73, 75, 6, 1, 1, 1, + } + fdChunk2Hash = []byte{ + 2, 2, 2, 249, 145, 71, 241, 88, 208, 4, 80, 132, 88, 43, 189, 93, 19, 104, 255, 61, 177, 177, 223, + 188, 144, 9, 73, 75, 6, 2, 2, 2, + } +) + +func (mfs *mockFileService) ParseFileChunkHashes(fileHashes []byte, hashLength int) (fileHashesAry [][]byte, err error) { + if mfs.emptyRes { + return nil, nil + } + if mfs.successParseFileChunkHashes { + return [][]byte{ + fdChunk1Hash, + fdChunk2Hash, + }, nil + } + return nil, errors.New("ParseFileChunkHashesFailed") +} + +func (mfs *mockFileService) GetFileNameFromHash(fileHash []byte) string { + return "testFileName" +} + +func (mp2p *mockP2pService) DownloadFilesFromPeer(fileChunksNames []string, retryCount uint32) (failed []string, err error) { + failed = make([]string, 0) + if mp2p.success { + return + } + return []string{"testFailedFile1"}, errors.New("DownloadFilesFromPeerFailed") +} + +func TestFileDownloader_DownloadSnapshot(t *testing.T) { + type fields struct { + FileService service.FileServiceInterface + P2pService Peer2PeerServiceInterface + BlockchainStatusService service.BlockchainStatusServiceInterface + Logger *log.Logger + } + type args struct { + ct chaintype.ChainType + spineBlockManifest *model.SpineBlockManifest + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "DownloadSnapshot:success", + args: args{ + ct: &chaintype.MainChain{}, + spineBlockManifest: &model.SpineBlockManifest{}, + }, + fields: fields{ + FileService: &mockFileService{ + successParseFileChunkHashes: true, + }, + P2pService: &mockP2pService{ + success: true, + }, + BlockchainStatusService: service.NewBlockchainStatusService(false, log.New()), + }, + }, + { + name: "DownloadSnapshot:fail-{ParseFileChunkHashesErr}", + args: args{ + ct: &chaintype.MainChain{}, + spineBlockManifest: &model.SpineBlockManifest{}, + }, + fields: fields{ + FileService: &mockFileService{ + successParseFileChunkHashes: false, + }, + P2pService: &mockP2pService{ + success: true, + }, + BlockchainStatusService: service.NewBlockchainStatusService(false, log.New()), + }, + wantErr: true, + }, + { + name: "DownloadSnapshot:fail-{ParseFileChunkHashesEmptyResult}", + args: args{ + ct: &chaintype.MainChain{}, + spineBlockManifest: &model.SpineBlockManifest{}, + }, + fields: fields{ + FileService: &mockFileService{ + successParseFileChunkHashes: true, + emptyRes: true, + }, + P2pService: &mockP2pService{ + success: true, + }, + BlockchainStatusService: service.NewBlockchainStatusService(false, log.New()), + }, + wantErr: true, + }, + { + name: "DownloadSnapshot:fail-{DownloadFilesFromPeer}", + args: args{ + ct: &chaintype.MainChain{}, + spineBlockManifest: &model.SpineBlockManifest{}, + }, + fields: fields{ + FileService: &mockFileService{ + successParseFileChunkHashes: true, + }, + P2pService: &mockP2pService{ + success: false, + }, + Logger: log.New(), + BlockchainStatusService: service.NewBlockchainStatusService(false, log.New()), + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ss := &FileDownloader{ + FileService: tt.fields.FileService, + P2pService: tt.fields.P2pService, + BlockchainStatusService: tt.fields.BlockchainStatusService, + Logger: tt.fields.Logger, + } + if err := ss.DownloadSnapshot(tt.args.ct, tt.args.spineBlockManifest); (err != nil) != tt.wantErr { + t.Errorf("FileDownloader.DownloadSnapshot() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/p2p/handler/p2pServerHandler.go b/p2p/handler/p2pServerHandler.go index 22b8ef327..e64ae00e9 100644 --- a/p2p/handler/p2pServerHandler.go +++ b/p2p/handler/p2pServerHandler.go @@ -173,3 +173,23 @@ func (ss *P2PServerHandler) RequestBlockTransactions( req.GetTransactionIDs(), ) } + +// RequestDownloadFile receives an array of file names and return corresponding files. +func (ss *P2PServerHandler) RequestFileDownload( + ctx context.Context, + req *model.FileDownloadRequest, +) (*model.FileDownloadResponse, error) { + monitoring.IncrementGoRoutineActivity(monitoring.P2pRequestFileDownloadServer) + defer monitoring.DecrementGoRoutineActivity(monitoring.P2pRequestFileDownloadServer) + if len(req.FileChunkNames) == 0 { + return nil, blocker.NewBlocker( + blocker.RequestParameterErr, + "request does not contain any file name", + ) + } + res, err := ss.Service.RequestDownloadFile(ctx, req.GetFileChunkNames()) + if res != nil { + monitoring.IncrementSnapshotDownloadCounter(int32(len(res.FileChunks)), int32(len(res.Failed))) + } + return res, err +} diff --git a/p2p/p2p.go b/p2p/p2p.go index e039044da..0bf2f06e2 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -2,6 +2,7 @@ package p2p import ( log "github.com/sirupsen/logrus" + "github.com/zoobc/zoobc-core/common/blocker" "github.com/zoobc/zoobc-core/common/chaintype" "github.com/zoobc/zoobc-core/common/interceptor" "github.com/zoobc/zoobc-core/common/model" @@ -29,6 +30,7 @@ type ( queryExecutor query.ExecutorInterface, blockServices map[int32]coreService.BlockServiceInterface, mempoolServices map[int32]coreService.MempoolServiceInterface, + fileService coreService.FileServiceInterface, observer *observer.Observer, ) // exposed api list @@ -42,6 +44,7 @@ type ( SendTransactionListener() observer.Listener RequestBlockTransactionsListener() observer.Listener SendBlockTransactionsListener() observer.Listener + DownloadFilesFromPeer(fileChunksNames []string, retryCount uint32) (failed []string, err error) } Peer2PeerService struct { Host *model.Host @@ -49,6 +52,7 @@ type ( PeerServiceClient client.PeerServiceClientInterface Logger *log.Logger TransactionUtil transaction.UtilInterface + FileService coreService.FileServiceInterface } ) @@ -59,12 +63,15 @@ func NewP2PService( peerExplorer strategy.PeerExplorerStrategyInterface, logger *log.Logger, transactionUtil transaction.UtilInterface, + fileService coreService.FileServiceInterface, ) (Peer2PeerServiceInterface, error) { return &Peer2PeerService{ Host: host, PeerServiceClient: peerServiceClient, + Logger: logger, PeerExplorer: peerExplorer, TransactionUtil: transactionUtil, + FileService: fileService, }, nil } @@ -76,10 +83,12 @@ func (s *Peer2PeerService) StartP2P( queryExecutor query.ExecutorInterface, blockServices map[int32]coreService.BlockServiceInterface, mempoolServices map[int32]coreService.MempoolServiceInterface, + fileService coreService.FileServiceInterface, observer *observer.Observer, ) { // peer to peer service layer | under p2p handler p2pServerService := p2pService.NewP2PServerService( + fileService, s.PeerExplorer, blockServices, mempoolServices, @@ -266,3 +275,86 @@ func (s *Peer2PeerService) SendBlockTransactionsListener() observer.Listener { }, } } + +// DownloadFilesFromPeer download a file from a random peer +func (s *Peer2PeerService) DownloadFilesFromPeer(fileChunksNames []string, maxRetryCount uint32) ([]string, error) { + var ( + peer *model.Peer + resolvedPeers = s.PeerExplorer.GetResolvedPeers() + peerKey string + retryCount uint32 + ) + // Retry downloading from different peers until all chunks are downloaded or retry limit is reached + if len(resolvedPeers) < 1 { + return nil, blocker.NewBlocker(blocker.P2PNetworkConnectionErr, "no resolved peer can be found") + } + // convert the slice to a map to make it easier to find elements in it + fileChunkNamesMap := make(map[string]string) + for _, s := range fileChunksNames { + fileChunkNamesMap[s] = s + } + fileChunksToDownload := fileChunksNames + r := util.GetFastRandomSeed() + for retryCount < maxRetryCount+1 { + retryCount++ + + // randomly select one of the resolved peers to download files from + // (no need for secure random here. we just want to get a quick pseudo random index) + randomIdx := int(util.GetFastRandom(r, len(resolvedPeers))) + if randomIdx != 0 { + randomIdx %= len(resolvedPeers) + } + idx := 0 + for peerKey, peer = range resolvedPeers { + if idx == randomIdx { + // remove selected peer from map to avoid selecting it again + delete(resolvedPeers, peerKey) + break + } + idx++ + } + + // download the files + fileDownloadResponse, err := s.PeerServiceClient.RequestDownloadFile(peer, fileChunksToDownload) + if err != nil { + return nil, err + } + + // check first that all chunks returned are valid + skipFilesFromPeer := false + for _, fileChunk := range fileDownloadResponse.GetFileChunks() { + fileChunkComputedName := s.FileService.GetFileNameFromBytes(fileChunk) + if _, ok := fileChunkNamesMap[fileChunkComputedName]; !ok { + s.Logger.Errorf("peer returned an invalid file chunk: %s", fileChunkComputedName) + skipFilesFromPeer = true + break + } + } + // never trust a peer that returns wrong data, just skip all files downloaded from it + if skipFilesFromPeer { + continue + } + + // save downloaded chunks to storage as soon as possible to avoid keeping in memory large arrays + for _, fileChunk := range fileDownloadResponse.GetFileChunks() { + fileChunkComputedName := s.FileService.GetFileNameFromBytes(fileChunk) + err = s.FileService.SaveBytesToFile(s.FileService.GetDownloadPath(), fileChunkComputedName, fileChunk) + if err != nil { + s.Logger.Errorf("failed saving file to storage: %s", err) + return nil, err + } + } + + // set next files to download = previous files that failed to download + fileChunksToDownload = fileDownloadResponse.GetFailed() + // break download loop either if all files have been successfully downloaded or there are no more peers to connect to + if len(fileChunksToDownload) == 0 || len(resolvedPeers) == 0 { + if len(fileChunksToDownload) > 0 && len(resolvedPeers) == 0 { + s.Logger.Debug("no more resolved peers to download files from. Already tried them all!") + } + break + } + } + + return fileChunksToDownload, nil +} diff --git a/p2p/p2p_test.go b/p2p/p2p_test.go new file mode 100644 index 000000000..d6807787a --- /dev/null +++ b/p2p/p2p_test.go @@ -0,0 +1,288 @@ +package p2p + +import ( + "bytes" + "github.com/pkg/errors" + "reflect" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/zoobc/zoobc-core/common/model" + "github.com/zoobc/zoobc-core/common/transaction" + coreService "github.com/zoobc/zoobc-core/core/service" + "github.com/zoobc/zoobc-core/p2p/client" + "github.com/zoobc/zoobc-core/p2p/strategy" +) + +type ( + p2pMockPeerExplorer struct { + strategy.PeerExplorerStrategyInterface + noResolvedPeers bool + } + p2pMockPeerServiceClient struct { + client.PeerServiceClient + noFailedDownloads bool + downloadErr bool + returnInvalidData bool + } + p2pMockFileService struct { + coreService.FileService + saveFileFailed bool + retFileName string + } +) + +var ( + p2pP1 = &model.Peer{ + Info: &model.Node{ + ID: 1111, + Port: 8080, + Address: "127.0.0.1", + }, + } + p2pP2 = &model.Peer{ + Info: &model.Node{ + ID: 2222, + Port: 9090, + Address: "127.0.0.2", + }, + } + p2pChunk1Bytes = []byte{ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + } + p2pChunk2Bytes = []byte{ + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + } + p2pChunk2InvalidBytes = []byte{ + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, + } +) + +func (p2pMpe *p2pMockPeerExplorer) GetResolvedPeers() map[string]*model.Peer { + if p2pMpe.noResolvedPeers { + return nil + } + peers := make(map[string]*model.Peer) + peers[p2pP1.Info.Address] = p2pP1 + peers[p2pP2.Info.Address] = p2pP2 + return peers +} + +func (p2pMpsc *p2pMockPeerServiceClient) RequestDownloadFile( + destPeer *model.Peer, + fileChunkNames []string, +) (*model.FileDownloadResponse, error) { + var ( + failed []string + downloadedChunks [][]byte + ) + if p2pMpsc.downloadErr { + return nil, errors.New("RequestDownloadFileFailed") + } + if p2pMpsc.returnInvalidData { + downloadedChunks = [][]byte{ + p2pChunk1Bytes, + p2pChunk2InvalidBytes, + } + } else { + downloadedChunks = [][]byte{ + p2pChunk1Bytes, + p2pChunk2Bytes, + } + } + if !p2pMpsc.noFailedDownloads { + failed = []string{ + "testChunkFailed1", + } + } + return &model.FileDownloadResponse{ + FileChunks: downloadedChunks, + Failed: failed, + }, nil +} + +func (p2pMfs *p2pMockFileService) GetFileNameFromBytes(fileBytes []byte) string { + if bytes.Equal(fileBytes, p2pChunk1Bytes) { + return "testChunk1" + } + if bytes.Equal(fileBytes, p2pChunk2Bytes) { + return "testChunk2" + } + if bytes.Equal(fileBytes, p2pChunk2InvalidBytes) { + return "testChunk2Invalid" + } + return p2pMfs.retFileName +} + +func (p2pMfs *p2pMockFileService) SaveBytesToFile(fileBasePath, filename string, b []byte) error { + if p2pMfs.saveFileFailed { + return errors.New("SaveBytesToFileFailed") + } + return nil +} + +func TestPeer2PeerService_DownloadFilesFromPeer(t *testing.T) { + type fields struct { + Host *model.Host + PeerExplorer strategy.PeerExplorerStrategyInterface + PeerServiceClient client.PeerServiceClientInterface + Logger *log.Logger + TransactionUtil transaction.UtilInterface + FileService coreService.FileServiceInterface + } + type args struct { + fileChunksNames []string + maxRetryCount uint32 + } + tests := []struct { + name string + fields fields + args args + wantFailed []string + wantErr bool + }{ + { + name: "DownloadFilesFromPeer:success-{noRetry}", + args: args{ + fileChunksNames: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + maxRetryCount: 0, + }, + fields: fields{ + Logger: log.New(), + PeerExplorer: &p2pMockPeerExplorer{}, + FileService: &p2pMockFileService{}, + PeerServiceClient: &p2pMockPeerServiceClient{}, + }, + wantFailed: []string{ + "testChunkFailed1", + }, + }, + { + name: "DownloadFilesFromPeer:success-{WithRetry}", + args: args{ + fileChunksNames: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + maxRetryCount: 1, + }, + fields: fields{ + Logger: log.New(), + PeerExplorer: &p2pMockPeerExplorer{}, + FileService: &p2pMockFileService{}, + PeerServiceClient: &p2pMockPeerServiceClient{}, + }, + wantFailed: []string{ + "testChunkFailed1", + }, + }, + { + name: "DownloadFilesFromPeer:success-{WithRetryNoFailedDownloads}", + args: args{ + fileChunksNames: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + maxRetryCount: 1, + }, + fields: fields{ + Logger: log.New(), + PeerExplorer: &p2pMockPeerExplorer{}, + FileService: &p2pMockFileService{}, + PeerServiceClient: &p2pMockPeerServiceClient{ + noFailedDownloads: true, + }, + }, + }, + { + name: "DownloadFilesFromPeer:fail-{DownloadFailed}", + args: args{ + fileChunksNames: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + maxRetryCount: 0, + }, + fields: fields{ + Logger: log.New(), + PeerExplorer: &p2pMockPeerExplorer{}, + FileService: &p2pMockFileService{}, + PeerServiceClient: &p2pMockPeerServiceClient{ + downloadErr: true, + }, + }, + wantErr: true, + }, + { + name: "DownloadFilesFromPeer:success-{DownloadedInvalidFileChunk}", + args: args{ + fileChunksNames: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + maxRetryCount: 0, + }, + fields: fields{ + Logger: log.New(), + PeerExplorer: &p2pMockPeerExplorer{}, + FileService: &p2pMockFileService{}, + PeerServiceClient: &p2pMockPeerServiceClient{ + returnInvalidData: true, + }, + }, + wantFailed: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + }, + { + name: "DownloadFilesFromPeer:fail-{SaveFileFailed}", + args: args{ + fileChunksNames: []string{ + "testChunk1", + "testChunk2", + "testChunk3", + }, + maxRetryCount: 0, + }, + fields: fields{ + Logger: log.New(), + PeerExplorer: &p2pMockPeerExplorer{}, + FileService: &p2pMockFileService{ + saveFileFailed: true, + }, + PeerServiceClient: &p2pMockPeerServiceClient{}, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &Peer2PeerService{ + Host: tt.fields.Host, + PeerExplorer: tt.fields.PeerExplorer, + PeerServiceClient: tt.fields.PeerServiceClient, + Logger: tt.fields.Logger, + TransactionUtil: tt.fields.TransactionUtil, + FileService: tt.fields.FileService, + } + gotFailed, err := s.DownloadFilesFromPeer(tt.args.fileChunksNames, tt.args.maxRetryCount) + if (err != nil) != tt.wantErr { + t.Errorf("Peer2PeerService.DownloadFilesFromPeer() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotFailed, tt.wantFailed) { + t.Errorf("Peer2PeerService.DownloadFilesFromPeer() = %v, want %v", gotFailed, tt.wantFailed) + } + }) + } +} diff --git a/p2p/service/p2pServerService.go b/p2p/service/p2pServerService.go index b59bd9eec..60dc00849 100644 --- a/p2p/service/p2pServerService.go +++ b/p2p/service/p2pServerService.go @@ -69,9 +69,14 @@ type ( blockID int64, transactionsIDs []int64, ) (*model.Empty, error) + RequestDownloadFile( + ctx context.Context, + fileChunkNames []string, + ) (*model.FileDownloadResponse, error) } P2PServerService struct { + FileService coreService.FileServiceInterface PeerExplorer strategy.PeerExplorerStrategyInterface BlockServices map[int32]coreService.BlockServiceInterface MempoolServices map[int32]coreService.MempoolServiceInterface @@ -81,6 +86,7 @@ type ( ) func NewP2PServerService( + fileService coreService.FileServiceInterface, peerExplorer strategy.PeerExplorerStrategyInterface, blockServices map[int32]coreService.BlockServiceInterface, mempoolServices map[int32]coreService.MempoolServiceInterface, @@ -89,6 +95,7 @@ func NewP2PServerService( ) *P2PServerService { return &P2PServerService{ + FileService: fileService, PeerExplorer: peerExplorer, BlockServices: blockServices, MempoolServices: mempoolServices, @@ -256,7 +263,7 @@ func (ps *P2PServerService) GetNextBlockIDs( if err != nil { return nil, blocker.NewBlocker(blocker.BlockNotFoundErr, err.Error()) } - blocks, err := blockService.GetBlocksFromHeight(foundBlock.Height, limit) + blocks, err := blockService.GetBlocksFromHeight(foundBlock.Height, limit, false) if err != nil { return nil, blocker.NewBlocker( blocker.BlockErr, @@ -293,7 +300,7 @@ func (ps *P2PServerService) GetNextBlocks( if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - blocks, err := blockService.GetBlocksFromHeight(block.Height, uint32(len(blockIDList))) + blocks, err := blockService.GetBlocksFromHeight(block.Height, uint32(len(blockIDList)), true) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } @@ -431,3 +438,29 @@ func (ps *P2PServerService) RequestBlockTransactions( } return nil, status.Error(codes.Unauthenticated, "Rejected request") } + +func (ps *P2PServerService) RequestDownloadFile( + ctx context.Context, + fileChunkNames []string, +) (*model.FileDownloadResponse, error) { + var ( + fileChunks = make([][]byte, 0) + failed []string + ) + if ps.PeerExplorer.ValidateRequest(ctx) { + for _, fileName := range fileChunkNames { + chunkBytes, err := ps.FileService.ReadFileByName(ps.FileService.GetDownloadPath(), fileName) + if err != nil { + failed = append(failed, fileName) + } else { + fileChunks = append(fileChunks, chunkBytes) + } + } + res := &model.FileDownloadResponse{ + FileChunks: fileChunks, + Failed: failed, + } + return res, nil + } + return nil, status.Error(codes.Unauthenticated, "Rejected request") +} diff --git a/resource/config2.toml b/resource/config2.toml index 8624ba053..3d5f841bd 100644 --- a/resource/config2.toml +++ b/resource/config2.toml @@ -4,7 +4,7 @@ badgerDbPath="./resource" badgerDbName="zoobc_2_kv/" configPath = "./resource/" nodeKeyFile = "node_keys_2.json" -snapshotPath = "./resource/snapshots" +snapshotPath = "./resource/snapshots_2" # Peer peerPort=8002 @@ -24,5 +24,5 @@ proofOfOwnershipReqTimeoutSec = 1 # Available log level: info, warn, error, fatal, panic logLevels=["fatal", "error", "panic"] -apiapiCertFile="./resource/network.crt" +apiCertFile="./resource/network.crt" apiKeyFile="./resource/network.key" \ No newline at end of file