diff --git a/src/Makefile.am b/src/Makefile.am index afeef92359..a62ccee43b 100755 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -57,11 +57,14 @@ endif .PHONY: FORCE # gridcoin core # GRIDCOIN_CORE_H = \ - addrman.h \ + addrdb.h \ + addrman.h \ alert.h \ allocators.h \ + attributes.h \ appcache.h \ backup.h \ + banman.h \ base58.h \ beacon.h \ bignum.h \ @@ -76,6 +79,7 @@ GRIDCOIN_CORE_H = \ contract/contract.h \ crypter.h \ db.h \ + fs.h \ fwd.h \ global_objects.hpp \ global_objects_noui.hpp \ @@ -100,9 +104,11 @@ GRIDCOIN_CORE_H = \ pbkdf2.h \ prevector.h \ protocol.h \ + reverselock.h \ rpcclient.h \ rpcprotocol.h \ rpcserver.h \ + scheduler.h \ scraper_net.h \ scraper/fwd.h \ scraper/http.h \ @@ -124,11 +130,13 @@ GRIDCOIN_CORE_H = \ walletdb.h \ wallet.h -GRIDCOIN_CORE_CPP = addrman.cpp \ +GRIDCOIN_CORE_CPP = addrdb.cpp \ + addrman.cpp \ alert.cpp \ allocators.cpp \ appcache.cpp \ backup.cpp \ + banman.cpp \ beacon.cpp \ block.cpp \ boinc.cpp \ @@ -137,6 +145,7 @@ GRIDCOIN_CORE_CPP = addrman.cpp \ contract/contract.cpp \ crypter.cpp \ db.cpp \ + fs.cpp \ gridcoin.cpp \ init.cpp \ kernel.cpp \ @@ -172,6 +181,7 @@ GRIDCOIN_CORE_CPP = addrman.cpp \ scraper/scraper.cpp \ script.cpp \ scrypt.cpp \ + scheduler.cpp \ sync.cpp \ tally.cpp \ txdb-leveldb.cpp \ diff --git a/src/addrdb.cpp b/src/addrdb.cpp new file mode 100644 index 0000000000..20d5a5fa39 --- /dev/null +++ b/src/addrdb.cpp @@ -0,0 +1,149 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +// #include +// #include +#include +// #include +// #include +#include +// #include +// #include "main.h" +#include "util.h" +#include "net.h" +#include "fs.h" + +namespace { + +template +bool SerializeDB(Stream& stream, const Data& data) +{ + // Write and commit header, data + try { + CHashWriter hasher(SER_DISK, CLIENT_VERSION); + stream << FLATDATA(pchMessageStart) << data; + hasher << FLATDATA(pchMessageStart) << data; + stream << hasher.GetHash(); + } catch (const std::exception& e) { + return error("%s: Serialize or I/O error - %s", __func__, e.what()); + } + + return true; +} + +template +bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data& data) +{ + // Generate random temporary filename + unsigned short randv = 0; + RAND_bytes((unsigned char*)&randv, sizeof(randv)); + std::string tmpfn = strprintf("%s.%04x", prefix, randv); + + // open temp output file, and associate with CAutoFile + fs::path pathTmp = GetDataDir() / tmpfn; + FILE *file = fsbridge::fopen(pathTmp, "wb"); + CAutoFile fileout(file, SER_DISK, CLIENT_VERSION); + if (fileout.IsNull()) + return error("%s: Failed to open file %s", __func__, pathTmp.string()); + + // Serialize + if (!SerializeDB(fileout, data)) return false; + if (!FileCommit(fileout.Get())) + return error("%s: Failed to flush file %s", __func__, pathTmp.string()); + fileout.fclose(); + + // replace existing file, if any, with new file + if (!RenameOver(pathTmp, path)) + return error("%s: Rename-into-place failed", __func__); + + return true; +} + +template +bool DeserializeDB(Stream& stream, Data& data, bool fCheckSum = true) +{ + try { + CHashVerifier verifier(&stream); + // de-serialize file header (network specific magic number) and .. + unsigned char pchMsgTmp[4]; + verifier >> pchMsgTmp; + // ... verify the network matches ours + if (memcmp(pchMsgTmp, pchMessageStart, sizeof(pchMsgTmp))) + return error("%s: Invalid network magic number", __func__); + + // de-serialize data + verifier >> data; + + // verify checksum + if (fCheckSum) { + uint256 hashTmp; + stream >> hashTmp; + if (hashTmp != verifier.GetHash()) { + return error("%s: Checksum mismatch, data corrupted", __func__); + } + } + } + catch (const std::exception& e) { + return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + } + + return true; +} + +template +bool DeserializeFileDB(const fs::path& path, Data& data) +{ + // open input file, and associate with CAutoFile + FILE *file = fsbridge::fopen(path, "rb"); + CAutoFile filein(file, SER_DISK, CLIENT_VERSION); + if (filein.IsNull()) + return error("%s: Failed to open file %s", __func__, path.string()); + + return DeserializeDB(filein, data); +} + +} + +CBanDB::CBanDB(fs::path ban_list_path) : m_ban_list_path(std::move(ban_list_path)) +{ +} + +bool CBanDB::Write(const banmap_t& banSet) +{ + return SerializeFileDB("banlist", m_ban_list_path, banSet); +} + +bool CBanDB::Read(banmap_t& banSet) +{ + return DeserializeFileDB(m_ban_list_path, banSet); +} + +CAddrDB::CAddrDB() +{ + pathAddr = GetDataDir() / "peers.dat"; +} + +bool CAddrDB::Write(const CAddrMan& addr) +{ + return SerializeFileDB("peers", pathAddr, addr); +} + +bool CAddrDB::Read(CAddrMan& addr) +{ + return DeserializeFileDB(pathAddr, addr); +} + +bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers) +{ + bool ret = DeserializeDB(ssPeers, addr, false); + if (!ret) { + // Ensure addrman is left in a clean state + addr.Clear(); + } + return ret; +} diff --git a/src/addrdb.h b/src/addrdb.h new file mode 100644 index 0000000000..bd2f6d16cc --- /dev/null +++ b/src/addrdb.h @@ -0,0 +1,108 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_ADDRDB_H +#define BITCOIN_ADDRDB_H + +#include +#include + +#include +#include + +class CSubNet; +class CAddrMan; +class CDataStream; + +typedef enum BanReason +{ + BanReasonUnknown = 0, + BanReasonNodeMisbehaving = 1, + BanReasonManuallyAdded = 2 +} BanReason; + +class CBanEntry +{ +public: + static const int CURRENT_VERSION=1; + int nVersion; + int64_t nCreateTime; + int64_t nBanUntil; + uint8_t banReason; + + CBanEntry() + { + SetNull(); + } + + explicit CBanEntry(int64_t nCreateTimeIn) + { + SetNull(); + nCreateTime = nCreateTimeIn; + } + + explicit CBanEntry(int64_t n_create_time_in, BanReason ban_reason_in) : CBanEntry(n_create_time_in) + { + banReason = ban_reason_in; + } + + ADD_SERIALIZE_METHODS; + + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITEVARIADIC(this->nVersion); + READWRITEVARIADIC(nCreateTime); + READWRITEVARIADIC(nBanUntil); + READWRITEVARIADIC(banReason); + } + + void SetNull() + { + nVersion = CBanEntry::CURRENT_VERSION; + nCreateTime = 0; + nBanUntil = 0; + banReason = BanReasonUnknown; + } + + std::string banReasonToString() const + { + switch (banReason) { + case BanReasonNodeMisbehaving: + return "node misbehaving"; + case BanReasonManuallyAdded: + return "manually added"; + default: + return "unknown"; + } + } +}; + +typedef std::map banmap_t; + +/** Access to the (IP) address database (peers.dat) */ +class CAddrDB +{ +private: + fs::path pathAddr; +public: + CAddrDB(); + bool Write(const CAddrMan& addr); + bool Read(CAddrMan& addr); + + static bool Read(CAddrMan& addr, CDataStream& ssPeers); +}; + +/** Access to the banlist database (banlist.dat) */ +class CBanDB +{ +private: + const fs::path m_ban_list_path; +public: + explicit CBanDB(fs::path ban_list_path); + bool Write(const banmap_t& banSet); + bool Read(banmap_t& banSet); +}; + +#endif // BITCOIN_ADDRDB_H diff --git a/src/addrman.h b/src/addrman.h index ca1b3a850e..9ccdbe9fc4 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -500,6 +500,36 @@ class CAddrMan Check(); } } + + void Clear() + { + LOCK(cs); + std::vector().swap(vRandom); + RAND_bytes(&nKey[0], 32); + vvTried = std::vector>(ADDRMAN_TRIED_BUCKET_COUNT, std::vector(0)); + vvNew = std::vector>(ADDRMAN_NEW_BUCKET_COUNT, std::set()); + // Will need for Bitcoin rebase + // nKey = insecure_rand.rand256(); + //for (size_t bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { + // for (size_t entry = 0; entry < ADDRMAN_BUCKET_SIZE; entry++) { + // vvNew[bucket][entry] = -1; + // } + //} + //for (size_t bucket = 0; bucket < ADDRMAN_TRIED_BUCKET_COUNT; bucket++) { + // for (size_t entry = 0; entry < ADDRMAN_BUCKET_SIZE; entry++) { + // vvTried[bucket][entry] = -1; + // } + //} + + nIdCount = 0; + nTried = 0; + nNew = 0; + // Will need for Bitcoin rebase + // nLastGood = 1; //Initially at 1 so that "never" is strictly worse. + mapInfo.clear(); + mapAddr.clear(); + } + }; #endif diff --git a/src/attributes.h b/src/attributes.h new file mode 100644 index 0000000000..45099bd8b8 --- /dev/null +++ b/src/attributes.h @@ -0,0 +1,22 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_ATTRIBUTES_H +#define BITCOIN_ATTRIBUTES_H + +#if defined(__has_cpp_attribute) +# if __has_cpp_attribute(nodiscard) +# define NODISCARD [[nodiscard]] +# endif +#endif +#ifndef NODISCARD +# if defined(_MSC_VER) && _MSC_VER >= 1700 +# define NODISCARD _Check_return_ +# else +# define NODISCARD __attribute__((warn_unused_result)) +# endif +#endif + +#endif // BITCOIN_ATTRIBUTES_H diff --git a/src/banman.cpp b/src/banman.cpp new file mode 100644 index 0000000000..8a16e8b102 --- /dev/null +++ b/src/banman.cpp @@ -0,0 +1,219 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2017 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "banman.h" + +#include "netbase.h" +#include "ui_interface.h" +#include "util.h" + + +BanMan::BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t default_ban_time) + : m_client_interface(client_interface), m_ban_db(std::move(ban_file)), m_default_ban_time(default_ban_time) +{ + if (m_client_interface) m_client_interface->InitMessage(_("Loading banlist...")); + + int64_t n_start = GetTimeMillis(); + m_is_dirty = false; + banmap_t banmap; + if (m_ban_db.Read(banmap)) { + SetBanned(banmap); // thread save setter + SetBannedSetDirty(false); // no need to write down, just read data + SweepBanned(); // sweep out unused entries + + LogPrint("network", "Loaded %d banned node ips/subnets from banlist.dat %dms\n", + banmap.size(), GetTimeMillis() - n_start); + } else { + LogPrintf("Invalid or missing banlist.dat; recreating\n"); + SetBannedSetDirty(true); // force write + DumpBanlist(); + } +} + +BanMan::~BanMan() +{ + DumpBanlist(); +} + +void BanMan::DumpBanlist() +{ + SweepBanned(); // clean unused entries (if bantime has expired) + + if (!BannedSetIsDirty()) return; + + int64_t n_start = GetTimeMillis(); + + banmap_t banmap; + GetBanned(banmap); + if (m_ban_db.Write(banmap)) { + SetBannedSetDirty(false); + } + + LogPrint("network", "Flushed %d banned node ips/subnets to banlist.dat %dms\n", + banmap.size(), GetTimeMillis() - n_start); +} + +void BanMan::ClearBanned() +{ + { + LOCK(m_cs_banned); + m_banned.clear(); + m_is_dirty = true; + } + DumpBanlist(); //store banlist to disk + if (m_client_interface) m_client_interface->BannedListChanged(); +} + +int BanMan::IsBannedLevel(CNetAddr net_addr) +{ + // Returns the most severe level of banning that applies to this address. + // 0 - Not banned + // 1 - Automatic misbehavior ban + // 2 - Any other ban + int level = 0; + auto current_time = GetTime(); + LOCK(m_cs_banned); + for (const auto& it : m_banned) { + CSubNet sub_net = it.first; + CBanEntry ban_entry = it.second; + + if (current_time < ban_entry.nBanUntil && sub_net.Match(net_addr)) { + if (ban_entry.banReason != BanReasonNodeMisbehaving) return 2; + level = 1; + } + } + return level; +} + +bool BanMan::IsBanned(CNetAddr net_addr) +{ + auto current_time = GetTime(); + LOCK(m_cs_banned); + for (const auto& it : m_banned) { + CSubNet sub_net = it.first; + CBanEntry ban_entry = it.second; + + if (current_time < ban_entry.nBanUntil && sub_net.Match(net_addr)) { + return true; + } + } + return false; +} + +bool BanMan::IsBanned(CSubNet sub_net) +{ + auto current_time = GetTime(); + LOCK(m_cs_banned); + banmap_t::iterator i = m_banned.find(sub_net); + if (i != m_banned.end()) { + CBanEntry ban_entry = (*i).second; + if (current_time < ban_entry.nBanUntil) { + return true; + } + } + return false; +} + +void BanMan::Ban(const CNetAddr& net_addr, const BanReason& ban_reason, int64_t ban_time_offset, bool since_unix_epoch) +{ + CSubNet sub_net(net_addr); + Ban(sub_net, ban_reason, ban_time_offset, since_unix_epoch); +} + +void BanMan::Ban(const CSubNet& sub_net, const BanReason& ban_reason, int64_t ban_time_offset, bool since_unix_epoch) +{ + CBanEntry ban_entry(GetTime(), ban_reason); + + int64_t normalized_ban_time_offset = ban_time_offset; + bool normalized_since_unix_epoch = since_unix_epoch; + if (ban_time_offset <= 0) { + normalized_ban_time_offset = m_default_ban_time; + normalized_since_unix_epoch = false; + } + ban_entry.nBanUntil = (normalized_since_unix_epoch ? 0 : GetTime()) + normalized_ban_time_offset; + + { + LOCK(m_cs_banned); + if (m_banned[sub_net].nBanUntil < ban_entry.nBanUntil) { + m_banned[sub_net] = ban_entry; + m_is_dirty = true; + } else + return; + } + if (m_client_interface) m_client_interface->BannedListChanged(); + + //store banlist to disk immediately if user requested ban + if (ban_reason == BanReasonManuallyAdded) DumpBanlist(); +} + +bool BanMan::Unban(const CNetAddr& net_addr) +{ + CSubNet sub_net(net_addr); + return Unban(sub_net); +} + +bool BanMan::Unban(const CSubNet& sub_net) +{ + { + LOCK(m_cs_banned); + if (m_banned.erase(sub_net) == 0) return false; + m_is_dirty = true; + } + if (m_client_interface) m_client_interface->BannedListChanged(); + DumpBanlist(); //store banlist to disk immediately + return true; +} + +void BanMan::GetBanned(banmap_t& banmap) +{ + LOCK(m_cs_banned); + // Sweep the banlist so expired bans are not returned + SweepBanned(); + banmap = m_banned; //create a thread safe copy +} + +void BanMan::SetBanned(const banmap_t& banmap) +{ + LOCK(m_cs_banned); + m_banned = banmap; + m_is_dirty = true; +} + +void BanMan::SweepBanned() +{ + int64_t now = GetTime(); + bool notify_ui = false; + { + LOCK(m_cs_banned); + banmap_t::iterator it = m_banned.begin(); + while (it != m_banned.end()) { + CSubNet sub_net = (*it).first; + CBanEntry ban_entry = (*it).second; + if (now > ban_entry.nBanUntil) { + m_banned.erase(it++); + m_is_dirty = true; + notify_ui = true; + LogPrint("network", "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, sub_net.ToString()); + } else + ++it; + } + } + // update UI + if (notify_ui && m_client_interface) { + m_client_interface->BannedListChanged(); + } +} + +bool BanMan::BannedSetIsDirty() +{ + LOCK(m_cs_banned); + return m_is_dirty; +} + +void BanMan::SetBannedSetDirty(bool dirty) +{ + LOCK(m_cs_banned); //reuse m_banned lock for the m_is_dirty flag + m_is_dirty = dirty; +} diff --git a/src/banman.h b/src/banman.h new file mode 100644 index 0000000000..a1a00309dd --- /dev/null +++ b/src/banman.h @@ -0,0 +1,70 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2017 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#ifndef BITCOIN_BANMAN_H +#define BITCOIN_BANMAN_H + +#include +#include + +#include +#include +#include + +// NOTE: When adjusting this, update rpcnet:setban's help ("24h") +static constexpr unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban + +class CClientUIInterface; +class CNetAddr; +class CSubNet; + +// Denial-of-service detection/prevention +// The idea is to detect peers that are behaving +// badly and disconnect/ban them, but do it in a +// one-coding-mistake-won't-shatter-the-entire-network +// way. +// IMPORTANT: There should be nothing I can give a +// node that it will forward on that will make that +// node's peers drop it. If there is, an attacker +// can isolate a node and/or try to split the network. +// Dropping a node for sending stuff that is invalid +// now but might be valid in a later version is also +// dangerous, because it can cause a network split +// between nodes running old code and nodes running +// new code. + +class BanMan +{ +public: + ~BanMan(); + BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t default_ban_time); + void Ban(const CNetAddr& net_addr, const BanReason& ban_reason, int64_t ban_time_offset = 0, bool since_unix_epoch = false); + void Ban(const CSubNet& sub_net, const BanReason& ban_reason, int64_t ban_time_offset = 0, bool since_unix_epoch = false); + void ClearBanned(); + int IsBannedLevel(CNetAddr net_addr); + bool IsBanned(CNetAddr net_addr); + bool IsBanned(CSubNet sub_net); + bool Unban(const CNetAddr& net_addr); + bool Unban(const CSubNet& sub_net); + void GetBanned(banmap_t& banmap); + void DumpBanlist(); + +private: + void SetBanned(const banmap_t& banmap); + bool BannedSetIsDirty(); + //!set the "dirty" flag for the banlist + void SetBannedSetDirty(bool dirty = true); + //!clean unused entries (if bantime has expired) + void SweepBanned(); + + CCriticalSection m_cs_banned; + banmap_t m_banned GUARDED_BY(m_cs_banned); + bool m_is_dirty GUARDED_BY(m_cs_banned); + CClientUIInterface* m_client_interface = nullptr; + CBanDB m_ban_db; + const int64_t m_default_ban_time; +}; + +extern std::unique_ptr g_banman; +#endif diff --git a/src/db.cpp b/src/db.cpp index babeb51639..d44a80cb4c 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -491,7 +491,7 @@ void CDBEnv::Flush(bool fShutdown) // CAddrDB // - +/* CAddrDB::CAddrDB() { pathAddr = GetDataDir() / "peers.dat"; @@ -588,4 +588,4 @@ bool CAddrDB::Read(CAddrMan& addr) return true; } - +*/ diff --git a/src/db.h b/src/db.h index c33d4e1b83..c7eff08816 100644 --- a/src/db.h +++ b/src/db.h @@ -308,6 +308,7 @@ class CDB /** Access to the (IP) address database (peers.dat) */ +/* class CAddrDB { private: @@ -317,5 +318,6 @@ class CAddrDB bool Write(const CAddrMan& addr); bool Read(CAddrMan& addr); }; +*/ #endif // BITCOIN_DB_H diff --git a/src/fs.cpp b/src/fs.cpp new file mode 100644 index 0000000000..7b422b8d70 --- /dev/null +++ b/src/fs.cpp @@ -0,0 +1,223 @@ +#include + +#ifndef WIN32 +#include +#else +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#include +#endif + +namespace fsbridge { + +FILE *fopen(const fs::path& p, const char *mode) +{ +#ifndef WIN32 + return ::fopen(p.string().c_str(), mode); +#else + std::wstring_convert,wchar_t> utf8_cvt; + return ::_wfopen(p.wstring().c_str(), utf8_cvt.from_bytes(mode).c_str()); +#endif +} + +#ifndef WIN32 + +static std::string GetErrorReason() { + return std::strerror(errno); +} + +FileLock::FileLock(const fs::path& file) +{ + fd = open(file.string().c_str(), O_RDWR); + if (fd == -1) { + reason = GetErrorReason(); + } +} + +FileLock::~FileLock() +{ + if (fd != -1) { + close(fd); + } +} + +bool FileLock::TryLock() +{ + if (fd == -1) { + return false; + } + struct flock lock; + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + lock.l_start = 0; + lock.l_len = 0; + if (fcntl(fd, F_SETLK, &lock) == -1) { + reason = GetErrorReason(); + return false; + } + return true; +} +#else + +static std::string GetErrorReason() { + wchar_t* err; + FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + nullptr, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), reinterpret_cast(&err), 0, nullptr); + std::wstring err_str(err); + LocalFree(err); + return std::wstring_convert>().to_bytes(err_str); +} + +FileLock::FileLock(const fs::path& file) +{ + hFile = CreateFileW(file.wstring().c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); + if (hFile == INVALID_HANDLE_VALUE) { + reason = GetErrorReason(); + } +} + +FileLock::~FileLock() +{ + if (hFile != INVALID_HANDLE_VALUE) { + CloseHandle(hFile); + } +} + +bool FileLock::TryLock() +{ + if (hFile == INVALID_HANDLE_VALUE) { + return false; + } + _OVERLAPPED overlapped = {0}; + if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, std::numeric_limits::max(), std::numeric_limits::max(), &overlapped)) { + reason = GetErrorReason(); + return false; + } + return true; +} +#endif + +std::string get_filesystem_error_message(const fs::filesystem_error& e) +{ +#ifndef WIN32 + return e.what(); +#else + // Convert from Multi Byte to utf-16 + std::string mb_string(e.what()); + int size = MultiByteToWideChar(CP_ACP, 0, mb_string.c_str(), mb_string.size(), nullptr, 0); + + std::wstring utf16_string(size, L'\0'); + MultiByteToWideChar(CP_ACP, 0, mb_string.c_str(), mb_string.size(), &*utf16_string.begin(), size); + // Convert from utf-16 to utf-8 + return std::wstring_convert, wchar_t>().to_bytes(utf16_string); +#endif +} + +#ifdef WIN32 +#ifdef __GLIBCXX__ + +// reference: https://github.com/gcc-mirror/gcc/blob/gcc-7_3_0-release/libstdc%2B%2B-v3/include/std/fstream#L270 + +static std::string openmodeToStr(std::ios_base::openmode mode) +{ + switch (mode & ~std::ios_base::ate) { + case std::ios_base::out: + case std::ios_base::out | std::ios_base::trunc: + return "w"; + case std::ios_base::out | std::ios_base::app: + case std::ios_base::app: + return "a"; + case std::ios_base::in: + return "r"; + case std::ios_base::in | std::ios_base::out: + return "r+"; + case std::ios_base::in | std::ios_base::out | std::ios_base::trunc: + return "w+"; + case std::ios_base::in | std::ios_base::out | std::ios_base::app: + case std::ios_base::in | std::ios_base::app: + return "a+"; + case std::ios_base::out | std::ios_base::binary: + case std::ios_base::out | std::ios_base::trunc | std::ios_base::binary: + return "wb"; + case std::ios_base::out | std::ios_base::app | std::ios_base::binary: + case std::ios_base::app | std::ios_base::binary: + return "ab"; + case std::ios_base::in | std::ios_base::binary: + return "rb"; + case std::ios_base::in | std::ios_base::out | std::ios_base::binary: + return "r+b"; + case std::ios_base::in | std::ios_base::out | std::ios_base::trunc | std::ios_base::binary: + return "w+b"; + case std::ios_base::in | std::ios_base::out | std::ios_base::app | std::ios_base::binary: + case std::ios_base::in | std::ios_base::app | std::ios_base::binary: + return "a+b"; + default: + return std::string(); + } +} + +void ifstream::open(const fs::path& p, std::ios_base::openmode mode) +{ + close(); + mode |= std::ios_base::in; + m_file = fsbridge::fopen(p, openmodeToStr(mode).c_str()); + if (m_file == nullptr) { + return; + } + m_filebuf = __gnu_cxx::stdio_filebuf(m_file, mode); + rdbuf(&m_filebuf); + if (mode & std::ios_base::ate) { + seekg(0, std::ios_base::end); + } +} + +void ifstream::close() +{ + if (m_file != nullptr) { + m_filebuf.close(); + fclose(m_file); + } + m_file = nullptr; +} + +void ofstream::open(const fs::path& p, std::ios_base::openmode mode) +{ + close(); + mode |= std::ios_base::out; + m_file = fsbridge::fopen(p, openmodeToStr(mode).c_str()); + if (m_file == nullptr) { + return; + } + m_filebuf = __gnu_cxx::stdio_filebuf(m_file, mode); + rdbuf(&m_filebuf); + if (mode & std::ios_base::ate) { + seekp(0, std::ios_base::end); + } +} + +void ofstream::close() +{ + if (m_file != nullptr) { + m_filebuf.close(); + fclose(m_file); + } + m_file = nullptr; +} +#else // __GLIBCXX__ + +static_assert(sizeof(*fs::path().BOOST_FILESYSTEM_C_STR) == sizeof(wchar_t), + "Warning: This build is using boost::filesystem ofstream and ifstream " + "implementations which will fail to open paths containing multibyte " + "characters. You should delete this static_assert to ignore this warning, " + "or switch to a different C++ standard library like the Microsoft C++ " + "Standard Library (where boost uses non-standard extensions to construct " + "stream objects with wide filenames), or the GNU libstdc++ library (where " + "a more complicated workaround has been implemented above)."); + +#endif // __GLIBCXX__ +#endif // WIN32 + +} // fsbridge diff --git a/src/fs.h b/src/fs.h new file mode 100644 index 0000000000..c713297d6e --- /dev/null +++ b/src/fs.h @@ -0,0 +1,96 @@ +// Copyright (c) 2017-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_FS_H +#define BITCOIN_FS_H + +#include +#include +#if defined WIN32 && defined __GLIBCXX__ +#include +#endif + +#define BOOST_FILESYSTEM_NO_DEPRECATED +#include +#include + +/** Filesystem operations and types */ +namespace fs = boost::filesystem; + +/** Bridge operations to C stdio */ +namespace fsbridge { + FILE *fopen(const fs::path& p, const char *mode); + + class FileLock + { + public: + FileLock() = delete; + FileLock(const FileLock&) = delete; + FileLock(FileLock&&) = delete; + explicit FileLock(const fs::path& file); + ~FileLock(); + bool TryLock(); + std::string GetReason() { return reason; } + + private: + std::string reason; +#ifndef WIN32 + int fd = -1; +#else + void* hFile = (void*)-1; // INVALID_HANDLE_VALUE +#endif + }; + + std::string get_filesystem_error_message(const fs::filesystem_error& e); + + // GNU libstdc++ specific workaround for opening UTF-8 paths on Windows. + // + // On Windows, it is only possible to reliably access multibyte file paths through + // `wchar_t` APIs, not `char` APIs. But because the C++ standard doesn't + // require ifstream/ofstream `wchar_t` constructors, and the GNU library doesn't + // provide them (in contrast to the Microsoft C++ library, see + // https://stackoverflow.com/questions/821873/how-to-open-an-stdfstream-ofstream-or-ifstream-with-a-unicode-filename/822032#822032), + // Boost is forced to fall back to `char` constructors which may not work properly. + // + // Work around this issue by creating stream objects with `_wfopen` in + // combination with `__gnu_cxx::stdio_filebuf`. This workaround can be removed + // with an upgrade to C++17, where streams can be constructed directly from + // `std::filesystem::path` objects. + +#if defined WIN32 && defined __GLIBCXX__ + class ifstream : public std::istream + { + public: + ifstream() = default; + explicit ifstream(const fs::path& p, std::ios_base::openmode mode = std::ios_base::in) { open(p, mode); } + ~ifstream() { close(); } + void open(const fs::path& p, std::ios_base::openmode mode = std::ios_base::in); + bool is_open() { return m_filebuf.is_open(); } + void close(); + + private: + __gnu_cxx::stdio_filebuf m_filebuf; + FILE* m_file = nullptr; + }; + class ofstream : public std::ostream + { + public: + ofstream() = default; + explicit ofstream(const fs::path& p, std::ios_base::openmode mode = std::ios_base::out) { open(p, mode); } + ~ofstream() { close(); } + void open(const fs::path& p, std::ios_base::openmode mode = std::ios_base::out); + bool is_open() { return m_filebuf.is_open(); } + void close(); + + private: + __gnu_cxx::stdio_filebuf m_filebuf; + FILE* m_file = nullptr; + }; +#else // !(WIN32 && __GLIBCXX__) + typedef fs::ifstream ifstream; + typedef fs::ofstream ofstream; +#endif // WIN32 && __GLIBCXX__ +}; + +#endif // BITCOIN_FS_H diff --git a/src/hash.h b/src/hash.h index a11a853156..ca317c5c69 100644 --- a/src/hash.h +++ b/src/hash.h @@ -197,7 +197,7 @@ class CHashVerifier : public CHashWriter CHashVerifier& operator>>(T&& obj) { // Unserialize from this stream - ::Unserialize(*this, obj); + ::Unserialize(*this, obj, nType, nVersion); return (*this); } }; diff --git a/src/init.cpp b/src/init.cpp index 3ca7ef61f4..087be59ced 100755 --- a/src/init.cpp +++ b/src/init.cpp @@ -8,11 +8,13 @@ #include "net.h" #include "txdb.h" #include "walletdb.h" +#include "banman.h" #include "rpcserver.h" #include "init.h" #include "ui_interface.h" #include "tally.h" #include "beacon.h" +#include "scheduler.h" #include "neuralnet/neuralnet.h" #include "neuralnet/researcher.h" @@ -25,10 +27,13 @@ #include // for to_lower() #include // for startswith() and endswith() + #include "global_objects_noui.hpp" bool LoadAdminMessages(bool bFullTableScan,std::string& out_errors); -extern boost::thread_group threadGroup; + +static boost::thread_group threadGroup; +static CScheduler scheduler; void TallyResearchAverages(CBlockIndex* index); extern void ThreadAppInit2(void* parg); @@ -54,6 +59,12 @@ extern bool fExplorer; extern bool fUseFastIndex; extern boost::filesystem::path pathScraper; +// Dump addresses to banlist.dat every 15 minutes (900s) +static constexpr int DUMP_BANS_INTERVAL = 60 * 15; + +std::unique_ptr g_banman; + + ////////////////////////////////////////////////////////////////////////////// // // Shutdown @@ -100,6 +111,11 @@ void Shutdown(void* parg) LogPrintf("gridcoinresearch exiting..."); fShutdown = true; + + // clean up the threads running serviceQueue: + threadGroup.interrupt_all(); + threadGroup.join_all(); + bitdb.Flush(false); StopNode(); bitdb.Flush(true); @@ -592,6 +608,15 @@ bool AppInit2(ThreadHandlerPtr threads) int64_t nStart; + + // Start the lightweight task scheduler thread + CScheduler::Function serviceLoop = std::bind(&CScheduler::serviceQueue, &scheduler); + threadGroup.create_thread(std::bind(&TraceThread, "scheduler", serviceLoop)); + + // TODO: Do we need this? It would require porting the Bitcoin signal handler. + // GetMainSignals().RegisterBackgroundSignalScheduler(scheduler); + + // ********************************************************* Step 5: verify database integrity uiInterface.InitMessage(_("Verifying database integrity...")); @@ -910,6 +935,11 @@ bool AppInit2(ThreadHandlerPtr threads) // ********************************************************* Step 10: load peers + // Ban manager instance should not already be instantiated + assert(!g_banman); + // Create ban manager instance. + g_banman = MakeUnique(GetDataDir() / "banlist.dat", &uiInterface, GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); + uiInterface.InitMessage(_("Loading addresses...")); if (fDebug10) LogPrintf("Loading addresses..."); nStart = GetTimeMillis(); @@ -984,5 +1014,10 @@ bool AppInit2(ThreadHandlerPtr threads) int64_t nBalanceInQuestion; pwalletMain->FixSpentCoins(nMismatchSpent, nBalanceInQuestion); + scheduler.scheduleEvery([]{ + g_banman->DumpBanlist(); + }, DUMP_BANS_INTERVAL * 1000); + + return true; } diff --git a/src/main.cpp b/src/main.cpp index 981063afae..25de8e7882 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1388,12 +1388,6 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CTransaction &tx, bool* pfMissingInput // This is done last to help prevent CPU exhaustion denial-of-service attacks. if (!tx.ConnectInputs(txdb, mapInputs, mapUnused, CDiskTxPos(1,1,1), pindexBest, false, false)) { - // If this happens repeatedly, purge peers - if (TimerMain("AcceptToMemoryPool", 20)) - { - LogPrint("mempool", "AcceptToMemoryPool::CleaningInboundConnections"); - CleanInboundConnections(true); - } if (fDebug || true) { return error("AcceptToMemoryPool : Unable to Connect Inputs %s", hash.ToString().c_str()); @@ -2328,10 +2322,6 @@ bool CTransaction::ConnectInputs(CTxDB& txdb, MapPrevTx inputs, mapClearBanned(); if (!pNode->fClient && !pNode->fOneShot && (pNode->nStartingHeight > (nBestHeight - 144)) && (pNode->nVersion < NOBLKS_VERSION_START || pNode->nVersion >= NOBLKS_VERSION_END) ) { if (hashStart==uint256(0)) @@ -4173,7 +4162,6 @@ void CleanInboundConnections(bool bClearAll) LOCK(cs_vNodes); for(CNode* pNode : vNodes) { - pNode->ClearBanned(); if (pNode->nStartingHeight < (nBestHeight-1000) || bClearAll) { pNode->fDisconnect=true; @@ -6388,6 +6376,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, } else if (strCommand == "part") { + LOCK(CSplitBlob::cs_mapParts); + CSplitBlob::RecvPart(pfrom,vRecv); } diff --git a/src/main.h b/src/main.h index fff2b6e51a..2e9f82b81f 100644 --- a/src/main.h +++ b/src/main.h @@ -110,6 +110,12 @@ inline bool IsV10Enabled(int nHeight) : nHeight >= 1420000; } +inline bool IsV11Enabled(int nHeight) +{ + // Returns false before planned intro of bv11. + return false; +} + inline int GetSuperblockAgeSpacing(int nHeight) { return (fTestNet ? 86400 : (nHeight > 364500) ? 86400 : 43200); diff --git a/src/net.cpp b/src/net.cpp index 680b505fe9..5adb3aca5d 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -8,9 +8,9 @@ #endif #include "db.h" +#include "banman.h" #include "net.h" #include "init.h" -#include "addrman.h" #include "ui_interface.h" #include "util.h" @@ -662,6 +662,34 @@ void CNode::CloseSocketDisconnect() } +bool CNode::DisconnectNode(const std::string& strNode) +{ + LOCK(cs_vNodes); + if (CNode* pnode = FindNode(strNode)) { + pnode->fDisconnect = true; + return true; + } + return false; +} + +bool CNode::DisconnectNode(const CSubNet& subnet) +{ + bool disconnected = false; + LOCK(cs_vNodes); + for (CNode* pnode : vNodes) { + if (subnet.Match(pnode->addr)) { + pnode->fDisconnect = true; + disconnected = true; + } + } + return disconnected; +} + +bool CNode::DisconnectNode(const CNetAddr& addr) +{ + return CNode::DisconnectNode(CSubNet(addr)); +} + void CNode::PushVersion() { int64_t nTime = GetAdjustedTime(); @@ -685,6 +713,8 @@ void CNode::PushVersion() } +// Superceded by banman. +/* std::map CNode::setBanned; CCriticalSection CNode::cs_setBanned; @@ -708,6 +738,7 @@ bool CNode::IsBanned(CNetAddr ip) } return fResult; } +*/ bool CNode::Misbehaving(int howmuch) { @@ -720,17 +751,13 @@ bool CNode::Misbehaving(int howmuch) nMisbehavior += howmuch; if (nMisbehavior >= GetArg("-banscore", 100)) { - int64_t banTime = GetAdjustedTime()+GetArg("-bantime", 60*60*24); // Default 24-hour ban - if (fDebug10) LogPrintf("Misbehaving: %s (%d -> %d) DISCONNECTING", addr.ToString(), nMisbehavior-howmuch, nMisbehavior); - { - LOCK(cs_setBanned); - if (setBanned[addr] < banTime) - setBanned[addr] = banTime; - } + if (fDebug) LogPrintf("Misbehaving: %s (%d -> %d) DISCONNECTING", addr.ToString(), nMisbehavior-howmuch, nMisbehavior); + + g_banman->Ban(addr, BanReasonNodeMisbehaving, DEFAULT_MISBEHAVING_BANTIME); CloseSocketDisconnect(); return true; } else - if (fDebug10) LogPrintf("Misbehaving: %s (%d -> %d)", addr.ToString(), nMisbehavior-howmuch, nMisbehavior); + if (fDebug) LogPrintf("Misbehaving: %s (%d -> %d)", addr.ToString(), nMisbehavior-howmuch, nMisbehavior); return false; } @@ -1103,7 +1130,7 @@ void ThreadSocketHandler2(void* parg) LogPrintf("Surpassed max inbound connections maxconnections:%" PRId64 " minus max_outbound:%i", GetArg("-maxconnections",250), MAX_OUTBOUND_CONNECTIONS); closesocket(hSocket); } - else if (CNode::IsBanned(addr)) + else if (g_banman->IsBanned(addr)) { if (fDebug10) LogPrintf("connection from %s dropped (banned)", addr.ToString()); closesocket(hSocket); @@ -1899,7 +1926,7 @@ bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOu return false; if (!strDest) if (IsLocal(addrConnect) || - FindNode((CNetAddr)addrConnect) || CNode::IsBanned(addrConnect) || + FindNode((CNetAddr)addrConnect) || g_banman->IsBanned(addrConnect) || FindNode(addrConnect.ToStringIPPort().c_str())) return false; if (strDest && FindNode(strDest)) diff --git a/src/net.h b/src/net.h index 10fe44e25f..16ae94aab7 100644 --- a/src/net.h +++ b/src/net.h @@ -594,6 +594,10 @@ class CNode void CancelSubscribe(unsigned int nChannel); void CloseSocketDisconnect(); + static bool DisconnectNode(const std::string& strNode); + static bool DisconnectNode(const CSubNet& subnet); + static bool DisconnectNode(const CNetAddr& addr); + // Denial-of-service detection/prevention // The idea is to detect peers that are behaving // badly and disconnect/ban them, but do it in a @@ -608,8 +612,8 @@ class CNode // dangerous, because it can cause a network split // between nodes running old code and nodes running // new code. - static void ClearBanned(); // needed for unit testing - static bool IsBanned(CNetAddr ip); + // static void ClearBanned(); // needed for unit testing + // static bool IsBanned(CNetAddr ip); bool Misbehaving(int howmuch); // 1 == a little, 100 == a lot void copyStats(CNodeStats &stats); diff --git a/src/netbase.cpp b/src/netbase.cpp index 92044c9298..c20674b94b 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -159,6 +159,43 @@ bool LookupNumeric(const char *pszName, CService& addr, int portDefault) return Lookup(pszName, addr, portDefault, false); } +bool LookupSubNet(const char* pszName, CSubNet& ret) +{ + std::string strSubnet(pszName); + size_t slash = strSubnet.find_last_of('/'); + std::vector vIP; + + std::string strAddress = strSubnet.substr(0, slash); + if (LookupHost(strAddress.c_str(), vIP, 1, false)) + { + CNetAddr network = vIP[0]; + if (slash != strSubnet.npos) + { + std::string strNetmask = strSubnet.substr(slash + 1); + int32_t n; + // IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n + if (ParseInt32(strNetmask, &n)) { // If valid number, assume /24 syntax + ret = CSubNet(network, n); + return ret.IsValid(); + } + else // If not a valid number, try full netmask syntax + { + // Never allow lookup for netmask + if (LookupHost(strNetmask.c_str(), vIP, 1, false)) { + ret = CSubNet(network, vIP[0]); + return ret.IsValid(); + } + } + } + else + { + ret = CSubNet(network); + return ret.IsValid(); + } + } + return false; +} + bool static Socks4(const CService &addrDest, SOCKET& hSocket) { LogPrintf("SOCKS4 connecting %s", addrDest.ToString()); @@ -1151,3 +1188,146 @@ void CService::SetPort(unsigned short portIn) { port = portIn; } + +CSubNet::CSubNet(): + valid(false) +{ + memset(netmask, 0, sizeof(netmask)); +} + +CSubNet::CSubNet(const CNetAddr &addr, int32_t mask) +{ + valid = true; + network = addr; + // Default to /32 (IPv4) or /128 (IPv6), i.e. match single address + memset(netmask, 255, sizeof(netmask)); + + // IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n + const int astartofs = network.IsIPv4() ? 12 : 0; + + int32_t n = mask; + if(n >= 0 && n <= (128 - astartofs*8)) // Only valid if in range of bits of address + { + n += astartofs*8; + // Clear bits [n..127] + for (; n < 128; ++n) + netmask[n>>3] &= ~(1<<(7-(n&7))); + } else + valid = false; + + // Normalize network according to netmask + for(int x=0; x<16; ++x) + network.ip[x] &= netmask[x]; +} + +CSubNet::CSubNet(const CNetAddr &addr, const CNetAddr &mask) +{ + valid = true; + network = addr; + // Default to /32 (IPv4) or /128 (IPv6), i.e. match single address + memset(netmask, 255, sizeof(netmask)); + + // IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n + const int astartofs = network.IsIPv4() ? 12 : 0; + + for(int x=astartofs; x<16; ++x) + netmask[x] = mask.ip[x]; + + // Normalize network according to netmask + for(int x=0; x<16; ++x) + network.ip[x] &= netmask[x]; +} + +CSubNet::CSubNet(const CNetAddr &addr): + valid(addr.IsValid()) +{ + memset(netmask, 255, sizeof(netmask)); + network = addr; +} + +/** + * @returns True if this subnet is valid, the specified address is valid, and + * the specified address belongs in this subnet. + */ +bool CSubNet::Match(const CNetAddr &addr) const +{ + if (!valid || !addr.IsValid()) + return false; + for(int x=0; x<16; ++x) + if ((addr.ip[x] & netmask[x]) != network.ip[x]) + return false; + return true; +} + +/** + * @returns The number of 1-bits in the prefix of the specified subnet mask. If + * the specified subnet mask is not a valid one, -1. + */ +static inline int NetmaskBits(uint8_t x) +{ + switch(x) { + case 0x00: return 0; + case 0x80: return 1; + case 0xc0: return 2; + case 0xe0: return 3; + case 0xf0: return 4; + case 0xf8: return 5; + case 0xfc: return 6; + case 0xfe: return 7; + case 0xff: return 8; + default: return -1; + } +} + +std::string CSubNet::ToString() const +{ + /* Parse binary 1{n}0{N-n} to see if mask can be represented as /n */ + int cidr = 0; + bool valid_cidr = true; + int n = network.IsIPv4() ? 12 : 0; + for (; n < 16 && netmask[n] == 0xff; ++n) + cidr += 8; + if (n < 16) { + int bits = NetmaskBits(netmask[n]); + if (bits < 0) + valid_cidr = false; + else + cidr += bits; + ++n; + } + for (; n < 16 && valid_cidr; ++n) + if (netmask[n] != 0x00) + valid_cidr = false; + + /* Format output */ + std::string strNetmask; + if (valid_cidr) { + strNetmask = strprintf("%u", cidr); + } else { + if (network.IsIPv4()) + strNetmask = strprintf("%u.%u.%u.%u", netmask[12], netmask[13], netmask[14], netmask[15]); + else + strNetmask = strprintf("%x:%x:%x:%x:%x:%x:%x:%x", + netmask[0] << 8 | netmask[1], netmask[2] << 8 | netmask[3], + netmask[4] << 8 | netmask[5], netmask[6] << 8 | netmask[7], + netmask[8] << 8 | netmask[9], netmask[10] << 8 | netmask[11], + netmask[12] << 8 | netmask[13], netmask[14] << 8 | netmask[15]); + } + + return network.ToString() + "/" + strNetmask; +} + +bool CSubNet::IsValid() const +{ + return valid; +} + +bool operator==(const CSubNet& a, const CSubNet& b) +{ + return a.valid == b.valid && a.network == b.network && !memcmp(a.netmask, b.netmask, 16); +} + +bool operator<(const CSubNet& a, const CSubNet& b) +{ + return (a.network < b.network || (a.network == b.network && memcmp(a.netmask, b.netmask, 16) < 0)); +} diff --git a/src/netbase.h b/src/netbase.h index 42570750c9..bb10e0bfce 100644 --- a/src/netbase.h +++ b/src/netbase.h @@ -82,8 +82,50 @@ class CNetAddr ( READWRITE(FLATDATA(ip)); ) + + friend class CSubNet; +}; + + + +class CSubNet +{ + protected: + /// Network (base) address + CNetAddr network; + /// Netmask, in network byte order + uint8_t netmask[16]; + /// Is this value valid? (only used to signal parse errors) + bool valid; + + public: + CSubNet(); + CSubNet(const CNetAddr &addr, int32_t mask); + CSubNet(const CNetAddr &addr, const CNetAddr &mask); + + //constructor for single ip subnet (/32 or /128) + explicit CSubNet(const CNetAddr &addr); + + bool Match(const CNetAddr &addr) const; + + std::string ToString() const; + bool IsValid() const; + + friend bool operator==(const CSubNet& a, const CSubNet& b); + friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); } + friend bool operator<(const CSubNet& a, const CSubNet& b); + + ADD_SERIALIZE_METHODS; + + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITEVARIADIC(network); + READWRITEVARIADIC(netmask); + READWRITEVARIADIC(valid); + } }; + /** A combination of a network address (CNetAddr) and a (TCP) port */ class CService : public CNetAddr { @@ -140,6 +182,7 @@ bool LookupHost(const char *pszName, std::vector& vIP, unsigned int nM bool Lookup(const char *pszName, CService& addr, int portDefault = 0, bool fAllowLookup = true); bool Lookup(const char *pszName, std::vector& vAddr, int portDefault = 0, bool fAllowLookup = true, unsigned int nMaxSolutions = 0); bool LookupNumeric(const char *pszName, CService& addr, int portDefault = 0); +bool LookupSubNet(const char *pszName, CSubNet& subnet); bool ConnectSocket(const CService &addr, SOCKET& hSocketRet, int nTimeout = nConnectTimeout); bool ConnectSocketByName(CService &addr, SOCKET& hSocketRet, const char *pszDest, int portDefault = 0, int nTimeout = nConnectTimeout); diff --git a/src/neuralnet/neuralnet_native.cpp b/src/neuralnet/neuralnet_native.cpp index 5e15ce1d0b..e9ab801b38 100644 --- a/src/neuralnet/neuralnet_native.cpp +++ b/src/neuralnet/neuralnet_native.cpp @@ -13,7 +13,6 @@ extern std::string ExplainMagnitude(std::string sCPID); using namespace NN; extern Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats = false, bool bContractDirectFromStatsUpdate = false); -extern QuorumHash ScraperGetSuperblockHash(); bool NeuralNetNative::IsEnabled() { @@ -34,7 +33,7 @@ std::string NeuralNetNative::GetNeuralHash() QuorumHash NeuralNetNative::GetSuperblockHash() { - return ScraperGetSuperblockHash(); + return GetSuperblockContract().GetHash(); } std::string NeuralNetNative::GetNeuralContract() diff --git a/src/neuralnet/superblock.cpp b/src/neuralnet/superblock.cpp index f7ea32af2b..2d661f84b5 100644 --- a/src/neuralnet/superblock.cpp +++ b/src/neuralnet/superblock.cpp @@ -1,5 +1,7 @@ #include "compat/endian.h" #include "neuralnet/superblock.h" +#include "scraper_net.h" +#include "sync.h" #include "util.h" #include @@ -10,7 +12,710 @@ using namespace NN; std::string ExtractXML(const std::string& XMLdata, const std::string& key, const std::string& key_end); std::string ExtractValue(std::string data, std::string delimiter, int pos); +// TODO: use a header +ScraperStats GetScraperStatsByConvergedManifest(ConvergedManifest& StructConvergedManifest); +ScraperStats GetScraperStatsFromSingleManifest(CScraperManifest &manifest); +unsigned int NumScrapersForSupermajority(unsigned int nScraperCount); +mmCSManifestsBinnedByScraper ScraperDeleteCScraperManifests(); +Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats = false, bool bContractDirectFromStatsUpdate = false); + +extern CCriticalSection cs_ConvergedScraperStatsCache; +extern ConvergedScraperStats ConvergedScraperStatsCache; + namespace { +//! +//! \brief Validates received superblocks against the local scraper manifest +//! data to prevent superblock statistics spoofing attacks. +//! +//! When a node publishes a superblock in a generated block, every node in the +//! network validates the superblock by comparing it to manifest data received +//! from the scrapers. For validation to pass, a node must generate a matching +//! superblock from the local manifest data. +//! +class SuperblockValidator +{ +public: + //! + //! \brief Describes a superblock validation outcome. + //! + enum class Result + { + UNKNOWN, //!< Not enough manifest data to try validation. + INVALID, //!< It does not match a valid set of manifest data. + VALID_CURRENT, //!< It matches the current cached convergence. + VALID_PAST, //!< It matches a cached past convergence. + VALID_BY_MANIFEST, //!< It matches a single manifest supermajority. + VALID_BY_PROJECT, //!< It matches by-project fallback supermajority. + }; + + //! + //! \brief Create a new validator for the provided superblock. + //! + //! \param superblock The superblock data to validate. + //! + SuperblockValidator(const Superblock& superblock) + : m_superblock(superblock) + , m_quorum_hash(superblock.GetHash()) + { + } + + //! + //! \brief Perform the validation of the superblock. + //! + //! \param use_cache If \c false, skip attempts to validate the superblock + //! using the cached scraper convergences. + //! + //! \return A value that describes the outcome of the validation. + //! + Result Validate(const bool use_cache = true) const + { + const Superblock local_contract = ScraperGetSuperblockContract(true); + + // If we cannot produce a superblock for comparison from the local set + // of manifest data, we don't have enough context to try to validate a + // superblock. Skip the validation and rely on peers to validate it. + // + if (!local_contract.WellFormed()) { + return Result::UNKNOWN; + } + + if (use_cache) { + if (m_quorum_hash == local_contract.GetHash()) { + return Result::VALID_CURRENT; + } + + LogPrintf("ValidateSuperblock(): No match to current convergence."); + + if (TryRecentPastConvergence()) { + return Result::VALID_PAST; + } + } + + LogPrintf("ValidateSuperblock(): No match using cached convergence."); + + if (!m_superblock.ConvergedByProject() && TryByManifest()) { + return Result::VALID_BY_MANIFEST; + } + + LogPrintf("ValidateSuperblock(): No match by manifest."); + + if (m_superblock.ConvergedByProject() && TryProjectFallback()) { + return Result::VALID_BY_PROJECT; + } + + LogPrintf("ValidateSuperblock(): No match by project."); + + return Result::INVALID; + } + +private: // SuperblockValidator classes + + //! + //! \brief Maintains the context of a whitelisted project for validating + //! fallback-to-project convergence scenarios. + //! + struct ResolvedProject + { + //! + //! \brief The manifest part hashes procured from the convergence hints + //! in the superblock that may correspond to the parts of this project. + //! + //! The \c ProjectResolver will attempt to match these hashes to a part + //! contained in a manifest for each scraper to find a supermajority. + //! + std::set m_candidate_hashes; + + //! + //! \brief The set of scrapers that produced the matching manifest part + //! for the project. + //! + //! This set must hold at least the mininum number of scraper IDs for a + //! supermajority for each project or the superblock validation fails. + //! + std::set m_scrapers; + + //! + //! \brief The manifest part hashes found in a manifest published by a + //! scraper used to retrieve the part for the project to construct the + //! convergence for comparison to the superblock. + //! + //! Keyed by timestamp. + //! + //! After successfully matching each of the convergence hints in the + //! superblock to a manifest project, the \c ProjectResolver selects + //! the most recent part of each \c ResolvedProject to construct the + //! final convergence. + //! + std::map m_resolved_parts; + + //! + //! \brief Initialize a new project context object. + //! + ResolvedProject() + { + } + + //! + //! \brief Initialize a new project context object. + //! + //! \param candidate hashes The manifest part hashes procured from the + //! convergence hints in the superblock. + //! + ResolvedProject(std::set candidate_hashes) + : m_candidate_hashes(std::move(candidate_hashes)) + { + } + + //! + //! \brief Determine whether the supplied manifest part matches a part + //! for the convergence of this project. + //! + //! \param part_hash The hash of a project part from a manifest. + //! + //! \return \c true if the part hash matches a part annotated for this + //! project by a hint in the validated superblock. + //! + bool Expects(const uint256& part_hash) const + { + return m_candidate_hashes.count(part_hash); + } + + //! + //! \brief Get the most recent part resolved from a manifest for this + //! project. + //! + //! \return The hash of the part for this project used to construct a + //! convergence for the validated superblock. + //! + uint256 MostRecentPartHash() const + { + return m_resolved_parts.rbegin()->second; + } + + //! + //! \brief Commit the part hash to this project and record the timestamp + //! of the manifest. + //! + //! \param part_hash Hash of the candidate part to commit. + //! \param time Timestamp of the manifest that contains the part. + //! + void LinkPart(const uint256& part_hash, const int64_t time) + { + m_resolved_parts.emplace(time, part_hash); + } + }; + + //! + //! \brief Reconstructs by-project convergence from local manifest data + //! based on project convergence hints from the superblock to produce a + //! new superblock used for comparison. + //! + class ProjectResolver + { + public: + //! + //! \brief Initialize a new project resolver. + //! + //! \param candidate_parts Map of project names to manifest part + //! hashes produced from superblock convergence hints. + //! \param manifests_by_scraper Manifest hashes grouped by scraper. + //! + ProjectResolver( + std::map> candidate_parts, + mmCSManifestsBinnedByScraper manifests_by_scraper) + : m_manifests_by_scraper(std::move(manifests_by_scraper)) + , m_supermajority(NumScrapersForSupermajority(m_manifests_by_scraper.size())) + , m_latest_manifest_timestamp(0) + { + for (auto&& project_part_pair : candidate_parts) { + m_resolved_projects.emplace( + std::move(project_part_pair.first), + ResolvedProject(std::move(project_part_pair.second))); + } + } + + //! + //! \brief Scan the local manifest data to reconstruct a supermajority + //! of manifest parts that match the candidate parts in the superblock. + //! + //! \return \c false after failing to resolve a matching part for each + //! of the projects in the superblock, or when the superblock contains + //! only some of the converged projects. + //! + bool ResolveProjectParts() + { + // Collect the manifest parts that match the hints in the superblock + // and record missing projects, if any: + // + for (const auto& by_scraper : m_manifests_by_scraper) { + const ScraperID& scraper_id = by_scraper.first; + const mCSManifest& hashes_by_time = by_scraper.second; + + for (const auto& hashes : hashes_by_time) { + const uint256& manifest_hash = hashes.second.first; + + ResolvePartsFor(manifest_hash, scraper_id); + } + } + + // Check that superblock is not missing a project that the scrapers + // successfully converged on: + // + for (const auto& project_pair : m_other_projects) { + if (project_pair.second.size() >= m_supermajority) { + LogPrintf( + "ValidateSuperblock(): fallback by-project resolution " + "failed. Converged project %s missing from superblock.", + project_pair.first); + + return false; + } + } + + // Check that each of the project parts hinted in the superblock + // matched a manifest part that the scrapers converged on: + // + for (const auto& project_pair : m_resolved_projects) { + if (project_pair.second.m_resolved_parts.empty()) { + LogPrintf( + "ValidateSuperblock(): fallback by-project resolution " + "failed. No manifest parts matched project %s.", + project_pair.first); + + return false; + } + + if (project_pair.second.m_scrapers.size() < m_supermajority) { + LogPrintf( + "ValidateSuperblock(): fallback by-project resolution " + "failed. No supermajority exists for project %s.", + project_pair.first); + + return false; + } + } + + return true; + } + + //! + //! \brief Construct a superblock from the local manifest data that + //! matches the most recent set of resolved project parts. + //! + //! \return A new superblock instance to compare to the superblock + //! under validation. + //! + Superblock BuildSuperblock() const + { + ConvergedManifest convergence; + + { + LOCK(CScraperManifest::cs_mapManifest); + + const auto iter = CScraperManifest::mapManifest.find(m_latest_manifest_hash); + + // If the manifest for the beacon list disappeared, we cannot + // proceed, but the most recent manifest should always exist: + if (iter == CScraperManifest::mapManifest.end()) { + LogPrintf("ValidateSuperblock(): beacon list manifest disappeared."); + return Superblock(); + } + + convergence.ConvergedManifestPartsMap.emplace( + "BeaconList", + iter->second->vParts[0]->data); + } + + { + LOCK(CSplitBlob::cs_mapParts); + + for (const auto& project_pair : m_resolved_projects) { + const auto iter = CSplitBlob::mapParts.find( + project_pair.second.MostRecentPartHash()); + + // If the resolved part disappeared, we cannot proceed, but + // the most recent project part should always exist: + if (iter == CSplitBlob::mapParts.end()) { + LogPrintf("ValidateSuperblock(): project part disappeared."); + return Superblock(); + } + + convergence.ConvergedManifestPartsMap.emplace( + project_pair.first, // project name + iter->second.data); // serialized part data + } + } + + return Superblock::FromStats(GetScraperStatsByConvergedManifest(convergence)); + } + + private: + //! + //! \brief Manifest hashes grouped by scraper to resolve a by-project + //! fallback convergence from. + //! + const mmCSManifestsBinnedByScraper m_manifests_by_scraper; + + //! + //! \brief The number of scrapers that must agree about a project to + //! consider it valid in a superblock. + //! + const size_t m_supermajority; + + //! + //! \brief Contains the project resolution context for each of the + //! projects hinted in the superblock. + //! + //! Keyed by project name. + //! + std::map m_resolved_projects; + + //! + //! \brief Contains the scrapers that published manifest data for any + //! projects not hinted in the superblock. + //! + //! Keyed by project name. A scraper supermajority for any project + //! disqualifies the superblock (it failed to include the project). + //! + std::map> m_other_projects; + + //! + //! \brief Timestamp of the most recent manifest matched to a project + //! in the superblock. + //! + int64_t m_latest_manifest_timestamp; + + //! + //! \brief Hash of the most recent manifest matched to a project in the + //! superblock. The beacon list part of the resolved convergence result + //! comes from this manifest. + //! + uint256 m_latest_manifest_hash; + + //! + //! \brief Record the supplied scraper ID for the specified project to + //! track supermajority status. + //! + //! \return A reference to the project resolution state if the project + //! exists in the superblock. + //! + boost::optional + TallyProject(const std::string& project, const ScraperID& scraper_id) + { + if (m_resolved_projects.count(project)) { + ResolvedProject& resolved = m_resolved_projects.at(project); + + resolved.m_scrapers.emplace(scraper_id); + + return resolved; + } + + m_other_projects[project].emplace(scraper_id); + + return boost::none; + } + + //! + //! \brief Store the hash and timestamp of the specified manifest if + //! the timestamp is more recent than the last seen manifest. + //! + //! After resolving each project part, the most recent matching manifest + //! will provide the manifest part for the convergence beacon list. + //! + //! \param hash The manifest hash to store. + //! \param time Timestamp of the + //! + void RecordLatestManifest(const uint256& hash, const int64_t time) + { + if (time > m_latest_manifest_timestamp) { + m_latest_manifest_timestamp = time; + m_latest_manifest_hash = hash; + } + } + + //! + //! \brief Match each part hinted by the superblock to a local manifest + //! published by each scraper. + //! + //! \param manifest_hash Hash of the manifest to match parts for. + //! \param scraper_id Used to establish supermajority for a manifest. + //! + void ResolvePartsFor(const uint256& manifest_hash, const ScraperID& scraper_id) + { + LOCK(CScraperManifest::cs_mapManifest); + + const auto iter = CScraperManifest::mapManifest.find(manifest_hash); + + if (iter == CScraperManifest::mapManifest.end()) { + return; + } + + const CScraperManifest& manifest = *iter->second; + + for (const auto& entry : manifest.projects) { + auto project_option = TallyProject(entry.project, scraper_id); + + // If this project does not exist in the superblock, skip the + // attempt to associate its parts: + // + if (!project_option) { + continue; + } + + for (const auto& part : manifest.vParts) { + if (project_option->Expects(part->hash)) { + project_option->LinkPart(part->hash, entry.LastModified); + RecordLatestManifest(manifest_hash, entry.LastModified); + } + } + } + } + }; // ProjectResolver + +private: // SuperblockValidator fields + + const Superblock& m_superblock; //!< Points to the superblock to validate. + const QuorumHash m_quorum_hash; //!< Hash of the superblock to validate. + +private: // SuperblockValidator methods + + //! + //! \brief Validate the superblock by comparing it to recent past converged + //! manifests in the cache. + //! + //! The scraper convergence cache stores a set of previous convergences for + //! the current superblock cycle with matching superblock hashes. This step + //! accepts superblocks if the embedded converged manifest hash hints match + //! a hash of a past convergence in this cache. + //! + //! \return \c true if the hinted convergence's superblock hash matches the + //! hash of the validated superblock. + //! + bool TryRecentPastConvergence() const + { + LOCK(cs_ConvergedScraperStatsCache); + + const uint32_t hint = m_superblock.m_convergence_hint; + const auto iter = ConvergedScraperStatsCache.PastConvergences.find(hint); + + return iter != ConvergedScraperStatsCache.PastConvergences.end() + && iter->second.first == m_quorum_hash; + } + + //! + //! \brief Validate the superblock by comparing it to all of the manifests + //! stored locally on the node with a content hash that matches the hint. + //! + //! When no cached manifest matches the superblock, we can try to match it + //! to a single manifest with a content hash that the supermajority of the + //! scrapers agree on. This routine only selects the manifests that have a + //! content hash corresponding to the convergence hint in the superblock. + //! + //! \return \c true when the hinted manifest's superblock hash matches the + //! hash of the validated superblock. + //! + bool TryByManifest() const + { + const mmCSManifestsBinnedByScraper manifests_by_scraper = ScraperDeleteCScraperManifests(); + const size_t supermajority = NumScrapersForSupermajority(manifests_by_scraper.size()); + + std::map content_hash_tally; + + for (const auto& by_scraper : manifests_by_scraper) { + const mCSManifest& hashes_by_time = by_scraper.second; + + for (const auto& hashes : hashes_by_time) { + const uint256& manifest_hash = hashes.second.first; + const uint256& content_hash = hashes.second.second; + + if (content_hash.Get64() >> 32 == m_superblock.m_manifest_content_hint) { + if (!content_hash_tally.emplace(content_hash, 1).second) { + content_hash_tally[content_hash]++; + } + + if (fDebug) { + LogPrintf( + "ValidateSuperblock(): manifest content hash %s " + "matched convergence hint. Matches %" PRIszu, + content_hash.ToString(), + content_hash_tally[content_hash]); + } + + if (content_hash_tally[content_hash] >= supermajority) { + if (fDebug) { + LogPrintf( + "ValidateSuperblock(): supermajority found for " + "manifest content hash: %s. Trying validation.", + content_hash.ToString()); + } + + if (TryManifest(manifest_hash)) { + return true; + } else { + // Disqualify this content hash: + content_hash_tally[content_hash] = 0; + } + } + } + } + } + + return false; + } + + //! + //! \brief Validate the superblock by comparing the specified manifest. + //! + //! \return \c true if the specified manifest builds a superblock that + //! matches the validated superblock. + //! + bool TryManifest(const uint256& manifest_hash) const + { + CScraperManifest manifest; + + { + LOCK(CScraperManifest::cs_mapManifest); + + const auto iter = CScraperManifest::mapManifest.find(manifest_hash); + + if (iter == CScraperManifest::mapManifest.end()) { + LogPrintf("ValidateSuperblock(): manifest not found"); + return false; + } + + // This is a copy on purpose to minimize lock time. + manifest = *iter->second; + } + + return TryManifest(manifest); + } + + //! + //! \brief Validate the superblock by comparing the provided manifest. + //! + //! \return \c true if the provided manifest builds a superblock that + //! matches the validated superblock. + //! + bool TryManifest(CScraperManifest& manifest) const + { + const ScraperStats stats = GetScraperStatsFromSingleManifest(manifest); + + return Superblock::FromStats(stats).GetHash() == m_quorum_hash; + } + + //! + //! \brief Validate the superblock by comparing it to a convergence created + //! by matching the project convergence hints in that superblock to project + //! parts agreed upon by a supermajority of the scrapers. + //! + //! In the fallback-to-project-level convergence scenario, superblocks will + //! contain project convergence hints for the manifest parts used to create + //! the superblock. This routine matches each project convergence hint to a + //! manifest part for each project. If a node can reconstruct a convergence + //! from those parts agreed upon by a supermajority of scrapers using local + //! manifest data, validation passes for the superblock. + //! + //! Validation will fail if the reconstructed convergence contains projects + //! not present in the superblock. + //! + //! \return \c true if the reconstructed convergence by project builds a + //! superblock that matches the validated superblock. + //! + bool TryProjectFallback() const + { + const std::map hints = CollectPartHints(); + std::map> candidates = CollectCandidateParts(hints); + + if (candidates.size() != hints.size()) { + LogPrintf( + "ValidateSuperblock(): fallback by-project resolution failed. " + "Could not match every project convergence hint to a part."); + + return false; + } + + ProjectResolver resolver(std::move(candidates), ScraperDeleteCScraperManifests()); + + if (!resolver.ResolveProjectParts()) { + return false; + } + + return resolver.BuildSuperblock().GetHash() == m_quorum_hash; + } + + //! + //! \brief Build a collection of project-level convergence hints from the + //! superblock. + //! + //! \return A map of manifest project part hints to project names. + //! + std::map CollectPartHints() const + { + std::map hints; + + for (const auto& project_pair : m_superblock.m_projects) { + hints.emplace( + project_pair.second.m_convergence_hint, + project_pair.first); // Project name + } + + return hints; + } + + //! + //! \brief Build a collection of manifest project part hashes from the + //! supplied convergence hints. + //! + //! \return A map of project names to manifest project part hashes. + //! + std::map> + CollectCandidateParts(const std::map& hints) const + { + struct Candidate + { + std::string m_project_name; + std::set m_part_hashes; + }; + + std::map candidates; + + { + LOCK(CSplitBlob::cs_mapParts); + + for (const auto& part_pair : CSplitBlob::mapParts) { + uint32_t hint = part_pair.second.hash.Get64() >> 32; + + const auto hint_iter = hints.find(hint); + + if (hint_iter != hints.end()) { + auto iter_pair = candidates.emplace(hint, Candidate()); + Candidate& candidate = iter_pair.first->second; + + // Set the project name if we just inserted a new candidate: + if (iter_pair.second) { + candidate.m_project_name = hint_iter->second; + } + + candidate.m_part_hashes.emplace(part_pair.second.hash); + } + } + } + + // Pivot the candidate part hashes that match the hints into a map keyed + // by project names: + // + std::map> candidates_by_project; + + for (auto&& candidate_pair : candidates) { + candidates_by_project.emplace( + std::move(candidate_pair.second.m_project_name), + std::move(candidate_pair.second.m_part_hashes)); + } + + return candidates_by_project; + } +}; // SuperblockValidator + //! //! \brief Parses and unpacks superblock data from legacy superblock contracts. //! @@ -231,6 +936,43 @@ static_assert(offsetof(struct BinaryResearcher, magnitude) == // Functions // ----------------------------------------------------------------------------- +bool NN::ValidateSuperblock(const Superblock& superblock, const bool use_cache) +{ + using Result = SuperblockValidator::Result; + + const Result result = SuperblockValidator(superblock).Validate(use_cache); + std::string message; + + switch (result) { + case Result::UNKNOWN: + message = "UNKNOWN - Waiting for manifest data"; + break; + case Result::INVALID: + message = "INVALID - Validation failed"; + break; + case Result::VALID_CURRENT: + message = "VALID_CURRENT - Matched current cached convergence"; + break; + case Result::VALID_PAST: + message = "VALID_PAST - Matched past cached convergence"; + break; + case Result::VALID_BY_MANIFEST: + message = "VALID_BY_MANIFEST - Matched supermajority by manifest"; + break; + case Result::VALID_BY_PROJECT: + message = "VALID_BY_PROJECT - Matched supermajority by project"; + break; + } + + LogPrintf("ValidateSuperblock(): %s.", message); + + return result != Result::INVALID; +} + +// ----------------------------------------------------------------------------- +// Legacy Functions +// ----------------------------------------------------------------------------- + std::string UnpackBinarySuperblock(std::string sBlock) { // 12-21-2015: R HALFORD: If the block is not binary, return the legacy format for backward compatibility @@ -313,6 +1055,8 @@ std::string PackBinarySuperblock(std::string sBlock) Superblock::Superblock() : m_version(Superblock::CURRENT_VERSION) + , m_convergence_hint(0) + , m_manifest_content_hint(0) , m_height(0) , m_timestamp(0) { @@ -320,11 +1064,39 @@ Superblock::Superblock() Superblock::Superblock(uint32_t version) : m_version(version) + , m_convergence_hint(0) + , m_manifest_content_hint(0) , m_height(0) , m_timestamp(0) { } +Superblock Superblock::FromConvergence(const ConvergedScraperStats& stats) +{ + Superblock superblock = Superblock::FromStats(stats.mScraperConvergedStats); + + superblock.m_convergence_hint = stats.Convergence.nContentHash.Get64() >> 32; + + if (!stats.Convergence.bByParts) { + superblock.m_manifest_content_hint = stats.Convergence.nUnderlyingManifestContentHash.Get64() >> 32; + return superblock; + } + + ProjectIndex& projects = superblock.m_projects; + + // Add hints created from the hashes of converged manifest parts to each + // superblock project section to assist receiving nodes with validation: + // + for (const auto& part_pair : stats.Convergence.ConvergedManifestPartsMap) { + const std::string& project_name = part_pair.first; + const CSerializeData& part_data = part_pair.second; + + projects.SetHint(project_name, part_data); + } + + return superblock; +} + Superblock Superblock::FromStats(const ScraperStats& stats) { // The loop below depends on the relative value of these enum types: @@ -379,6 +1151,10 @@ Superblock Superblock::FromStats(const ScraperStats& stats) Superblock Superblock::UnpackLegacy(const std::string& packed) { + if (packed.empty()) { + return Superblock(1); + } + // Legacy-packed superblocks always initialize to version 1: Superblock superblock(1); LegacySuperblockParser legacy(packed); @@ -418,11 +1194,32 @@ std::string Superblock::PackLegacy() const return out.str(); } +bool Superblock::WellFormed() const +{ + return m_version > 0 && m_version <= Superblock::CURRENT_VERSION + && !m_cpids.empty() + && !m_projects.empty(); +} + +bool Superblock::ConvergedByProject() const +{ + return m_projects.m_converged_by_project; +} + int64_t Superblock::Age() const { return GetAdjustedTime() - m_timestamp; } +QuorumHash Superblock::GetHash(const bool regenerate) const +{ + if (!m_hash_cache.Valid() || regenerate) { + m_hash_cache = QuorumHash::Hash(*this); + } + + return m_hash_cache; +} + // ----------------------------------------------------------------------------- // Class: Superblock::CpidIndex // ----------------------------------------------------------------------------- @@ -545,6 +1342,7 @@ Superblock::ProjectStats::ProjectStats() : m_total_credit(0) , m_average_rac(0) , m_rac(0) + , m_convergence_hint(0) { } @@ -555,6 +1353,7 @@ Superblock::ProjectStats::ProjectStats( : m_total_credit(total_credit) , m_average_rac(average_rac) , m_rac(rac) + , m_convergence_hint(0) { } @@ -562,6 +1361,7 @@ Superblock::ProjectStats::ProjectStats(uint64_t average_rac, uint64_t rac) : m_total_credit(0) , m_average_rac(average_rac) , m_rac(rac) + , m_convergence_hint(0) { } @@ -569,7 +1369,9 @@ Superblock::ProjectStats::ProjectStats(uint64_t average_rac, uint64_t rac) // Class: Superblock::ProjectIndex // ----------------------------------------------------------------------------- -Superblock::ProjectIndex::ProjectIndex() : m_total_rac(0) +Superblock::ProjectIndex::ProjectIndex() + : m_converged_by_project(false) + , m_total_rac(0) { } @@ -632,6 +1434,22 @@ void Superblock::ProjectIndex::Add(std::string name, const ProjectStats& stats) } } +void Superblock::ProjectIndex::SetHint( + const std::string& name, + const CSerializeData& part_data) +{ + auto iter = m_projects.find(name); + + if (iter == m_projects.end()) { + return; + } + + const uint256 part_hash = Hash(part_data.begin(), part_data.end()); + iter->second.m_convergence_hint = part_hash.Get64() >> 32; + + m_converged_by_project = true; +} + // ----------------------------------------------------------------------------- // Class: QuorumHash // ----------------------------------------------------------------------------- @@ -787,3 +1605,15 @@ std::string QuorumHash::ToString() const { return boost::apply_visitor(QuorumHashToStringVisitor(), m_hash); } + +unsigned int QuorumHash::GetSerializeSize(int nType, int nVersion) const +{ + switch (Which()) { + case Kind::SHA256: return 1 + sizeof(uint256); + case Kind::MD5: return 1 + sizeof(Md5Sum); + + // For variants without any associated data, we serialize the variant + // tag only as a single byte: + default: return 1; + } +} diff --git a/src/neuralnet/superblock.h b/src/neuralnet/superblock.h index 889bef31ca..ef54cd619d 100644 --- a/src/neuralnet/superblock.h +++ b/src/neuralnet/superblock.h @@ -9,11 +9,224 @@ #include #include +extern int64_t SCRAPER_CMANIFEST_RETENTION_TIME; + std::string UnpackBinarySuperblock(std::string block); std::string PackBinarySuperblock(std::string sBlock); +class ConvergedScraperStats; // Forward for Superblock + namespace NN { -class QuorumHash; // Forward for Superblock +class Superblock; // Forward for QuorumHash + +//! +//! \brief Hashes and stores the digest of a superblock. +//! +class QuorumHash +{ +public: + //! + //! \brief Internal representation of the result of a legacy MD5-based + //! superblock hash. + //! + typedef std::array Md5Sum; + + //! + //! \brief Describes the kind of hash contained in a \c QuorumHash object. + //! + enum class Kind + { + INVALID, //!< An empty or invalid quorum hash. + SHA256, //!< Hash created for superblocks version 2 and greater. + MD5, //!< Legacy hash created for superblocks before version 2. + }; + + //! + //! \brief A tag type that describes an empty or invalid quorum hash. + //! + struct Invalid { }; + + //! + //! \brief Initialize an invalid quorum hash object. + //! + QuorumHash(); + + //! + //! \brief Initialize a SHA256 quorum hash object variant. + //! + //! \param hash Contains the bytes of the superblock digest produced by + //! applying a SHA256 hashing algorithm to the significant data. + //! + QuorumHash(uint256 hash); + + //! + //! \brief Initialize an MD5 quorum hash object variant. + //! + //! \param hash Contains the bytes of the superblock digest produced by the + //! legacy MD5-based superblock hashing algorithm ("neural hash"). + //! + QuorumHash(Md5Sum legacy_hash); + + //! + //! \brief Initialize the appropriate quorum hash variant from the supplied + //! bytes. + //! + //! Initializes to an invalid hash variant when the bytes do not represent + //! a valid quorum hash. + //! + //! \param bytes 32 bytes for a SHA256 hash or 16 bytes for a legacy MD5 + //! hash. + //! + QuorumHash(const std::vector& bytes); + + //! + //! \brief Hash the provided superblock. + //! + //! \param superblock Superblock object containing the data to hash. + //! + //! \return The appropriate quorum hash variant digest depending on the + //! version number of the superblock. + //! + static QuorumHash Hash(const Superblock& superblock); + + //! + //! \brief Initialize a quorum hash object by parsing the supplied string + //! representation of a hash. + //! + //! \param hex A 64-character hex-encoded string for a SHA256 hash, or a + //! 32-character hex-encoded string for a legacy MD5 hash. + //! + //! \return A quorum hash object that contains the bytes of the hash value + //! represented by the string or an invalid quorum hash if the string does + //! not contain a well-formed MD5 or SHA256 hash. + //! + static QuorumHash Parse(const std::string& hex); + + bool operator==(const QuorumHash& other) const; + bool operator!=(const QuorumHash& other) const; + bool operator==(const uint256& other) const; + bool operator!=(const uint256& other) const; + bool operator==(const std::string& other) const; + bool operator!=(const std::string& other) const; + + //! + //! \brief Describe the type of hash contained. + //! + //! \return A value enumerated on \c QuorumHash::Kind . + //! + Kind Which() const; + + //! + //! \brief Determine whether the object contains a valid superblock hash. + //! + //! \return \c true if the object contains a SHA256 or legacy MD5 hash. + //! + bool Valid() const; + + //! + //! \brief Get a pointer to the bytes in the hash. + //! + //! \return A pointer to the beginning of the bytes in the hash, or a + //! \c nullptr value if the object contains an invalid hash. + //! + const unsigned char* Raw() const; + + //! + //! \brief Get the string representation of the hash. + //! + //! \return A 64-character hex-encoded string for a SHA256 hash, or a + //! 32-character hex-encoded string for a legacy MD5 hash. Returns an + //! empty string for an invalid hash. + //! + std::string ToString() const; + + //! + //! \brief Get the size of the data to serialize. + //! + //! \param nType Target protocol type (network, disk, etc.). + //! \param nVersion Protocol version. + //! + //! \return Size of the data in bytes. + //! + unsigned int GetSerializeSize(int nType, int nVersion) const; + + //! + //! \brief Serialize the object to the provided stream. + //! + //! \param stream The output stream. + //! \param nType Target protocol type (network, disk, etc.). + //! \param nVersion Protocol version. + //! + template + void Serialize(Stream& stream, int nType, int nVersion) const + { + unsigned char kind = m_hash.which(); + + ::Serialize(stream, kind, nType, nVersion); + + switch (static_cast(kind)) { + case Kind::INVALID: + break; // Suppress warning. + + case Kind::SHA256: + boost::get(m_hash).Serialize(stream, nType, nVersion); + break; + + case Kind::MD5: { + const Md5Sum& hash = boost::get(m_hash); + + FLATDATA(hash).Serialize(stream, nType, nVersion); + break; + } + } + } + + //! + //! \brief Deserialize the object from the provided stream. + //! + //! \param stream The input stream. + //! \param nType Target protocol type (network, disk, etc.). + //! \param nVersion Protocol version. + //! + template + void Unserialize(Stream& stream, int nType, int nVersion) + { + unsigned char kind; + + ::Unserialize(stream, kind, nType, nVersion); + + switch (static_cast(kind)) { + case Kind::SHA256: { + uint256 hash; + hash.Unserialize(stream, nType, nVersion); + + m_hash = hash; + break; + } + + case Kind::MD5: { + Md5Sum hash; + FLATDATA(hash).Unserialize(stream, nType, nVersion); + + m_hash = hash; + break; + } + + default: + m_hash = Invalid(); + break; + } + } + +private: + //! + //! \brief Contains the bytes of a SHA256 or MD5 digest. + //! + //! CONSENSUS: Do not remove or reorder the types in this variant. This + //! class relies on the type ordinality to tag serialized values. + //! + boost::variant m_hash; +}; // QuorumHash //! //! \brief A snapshot of BOINC statistics used to calculate and verify research @@ -320,11 +533,28 @@ class Superblock uint64_t m_average_rac; //!< Average project recent average credit. uint64_t m_rac; //!< Sum of the RAC of all the project CPIDs. + //! + //! \brief A truncated hash of the converged manifest part that forms + //! the project statistics. + //! + //! For fallback-to-project-level convergence scenarios, \c ProjectStats + //! objects include the hash of the manifest part to aid receiving nodes + //! with superblock validation. The hash is truncated to conserve space. + //! + uint32_t m_convergence_hint; + IMPLEMENT_SERIALIZE ( READWRITE(VARINT(m_total_credit)); READWRITE(VARINT(m_average_rac)); READWRITE(VARINT(m_rac)); + + // Only serialize and deserialize the convegence hint in fallback- + // to-project-level convergences: + // + if (nType & ProjectIndex::SER_CONVERGED_BY_PROJECT) { + READWRITE(m_convergence_hint); + } ) }; @@ -344,6 +574,21 @@ class Superblock typedef std::map::iterator iterator; typedef std::map::const_iterator const_iterator; + //! + //! \brief A serialization flag used to pass fallback-to-project-level + //! convergence context to \c ProjectStats serialization routines. + //! + //! The selected serialization modifier value will not conflict with + //! those enumerated in serialize.h. + //! + static constexpr int SER_CONVERGED_BY_PROJECT = 24; + + //! + //! \brief Indicates that the superblock was generated from a fallback- + //! to-project-level convergence. + //! + bool m_converged_by_project; + //! //! \brief Initialize an empty project index. //! @@ -407,8 +652,26 @@ class Superblock //! void Add(std::string name, const ProjectStats& stats); + //! + //! \brief Set the convergence part hint for the specified project. + //! + //! \param part_data The convergence part to create the hint from. + //! + void SetHint(const std::string& name, const CSerializeData& part_data); + IMPLEMENT_SERIALIZE ( + if (!(nType & SER_GETHASH)) { + READWRITE(m_converged_by_project); + + // Trigger serialization of ProjectStats convergence hints for + // superblocks generated by a fallback-to-project convergence: + // + if (m_converged_by_project) { + nType |= SER_CONVERGED_BY_PROJECT; + } + } + READWRITE(m_projects); // Tally up the recent average credit after deserializing. @@ -454,6 +717,16 @@ class Superblock //! uint32_t m_version = CURRENT_VERSION; + //! + //! \brief The truncated scraper convergence content hash and underlying + //! manifest content hash (they are computed differently). + //! + //! These values aid receiving nodes with validation for superblocks created + //! from past convergence data. + //! + uint32_t m_convergence_hint; + uint32_t m_manifest_content_hint; + CpidIndex m_cpids; //!< Maps superblock CPIDs to magntudes. ProjectIndex m_projects; //!< Whitelisted projects statistics. //std::vector m_verified_beacons; @@ -465,6 +738,8 @@ class Superblock ( if (!(nType & SER_GETHASH)) { READWRITE(m_version); + READWRITE(m_convergence_hint); + READWRITE(m_manifest_content_hint); } nVersion = m_version; @@ -486,6 +761,18 @@ class Superblock //! Superblock(uint32_t version); + //! + //! \brief Initialize a superblock from the provided converged scraper + //! statistics. + //! + //! \param stats Converged statistics containing CPID and project credit + //! data. + //! + //! \return A new superblock instance that contains the imported scraper + //! statistics. + //! + static Superblock FromConvergence(const ConvergedScraperStats& stats); + //! //! \brief Initialize a superblock from the provided scraper statistics. //! @@ -524,140 +811,66 @@ class Superblock std::string PackLegacy() const; //! - //! \brief Get the current age of the superblock. - //! - //! \return Superblock age in seconds. - //! - int64_t Age() const; -}; // Superblock - -//! -//! \brief Hashes and stores the digest of a superblock. -//! -class QuorumHash -{ -public: - //! - //! \brief Internal representation of the result of a legacy MD5-based - //! superblock hash. - //! - typedef std::array Md5Sum; - - //! - //! \brief Describes the kind of hash contained in a \c QuorumHash object. + //! \brief Determine whether the instance represents a complete superblock. //! - enum class Kind - { - INVALID, //!< An empty or invalid quorum hash. - SHA256, //!< Hash created for superblocks version 2 and greater. - MD5, //!< Legacy hash created for superblocks before version 2. - }; - + //! \return \c true if the superblock contains all of the required elements. //! - //! \brief A tag type that describes an empty or invalid quorum hash. - //! - struct Invalid { }; + bool WellFormed() const; //! - //! \brief Initialize an invalid quorum hash object. - //! - QuorumHash(); - + //! \brief Determine whether the superblock was generated from a fallback- + //! to-project-level scraper convergence. //! - //! \brief Initialize a SHA256 quorum hash object variant. + //! \return \c true if the ProjectIndex fallback convergence flag is set. //! - //! \param hash Contains the bytes of the superblock digest produced by - //! applying a SHA256 hashing algorithm to the significant data. - //! - QuorumHash(uint256 hash); + bool ConvergedByProject() const; //! - //! \brief Initialize an MD5 quorum hash object variant. - //! - //! \param hash Contains the bytes of the superblock digest produced by the - //! legacy MD5-based superblock hashing algorithm ("neural hash"). - //! - QuorumHash(Md5Sum legacy_hash); - - //! - //! \brief Initialize the appropriate quorum hash variant from the supplied - //! bytes. - //! - //! Initializes to an invalid hash variant when the bytes do not represent - //! a valid quorum hash. + //! \brief Get the current age of the superblock. //! - //! \param bytes 32 bytes for a SHA256 hash or 16 bytes for a legacy MD5 - //! hash. + //! \return Superblock age in seconds. //! - QuorumHash(const std::vector& bytes); + int64_t Age() const; //! - //! \brief Hash the provided superblock. + //! \brief Get a hash of the significant data in the superblock. //! - //! \param superblock Superblock object containing the data to hash. + //! \param regenerate If \c true, skip selection of any cached hash value + //! and recompute the hash. //! - //! \return The appropriate quorum hash variant digest depending on the - //! version number of the superblock. + //! \return A quorum hash object that contiains a SHA256 hash for version + //! 2+ superblocks or an MD5 hash for legacy version 1 superblocks. //! - static QuorumHash Hash(const Superblock& superblock); + QuorumHash GetHash(const bool regenerate = false) const; +private: //! - //! \brief Initialize a quorum hash object by parsing the supplied string - //! representation of a hash. - //! - //! \param hex A 64-character hex-encoded string for a SHA256 hash, or a - //! 32-character hex-encoded string for a legacy MD5 hash. - //! - //! \return A quorum hash object that contains the bytes of the hash value - //! represented by the string or an invalid quorum hash if the string does - //! not contain a well-formed MD5 or SHA256 hash. - //! - static QuorumHash Parse(const std::string& hex); - - bool operator==(const QuorumHash& other) const; - bool operator!=(const QuorumHash& other) const; - bool operator==(const uint256& other) const; - bool operator!=(const uint256& other) const; - bool operator==(const std::string& other) const; - bool operator!=(const std::string& other) const; - - //! - //! \brief Describe the type of hash contained. - //! - //! \return A value enumerated on \c QuorumHash::Kind . - //! - Kind Which() const; - - //! - //! \brief Determine whether the object contains a valid superblock hash. - //! - //! \return \c true if the object contains a SHA256 or legacy MD5 hash. - //! - bool Valid() const; - + //! \brief The most recently-regenerated quorum hash of the superblock. //! - //! \brief Get a pointer to the bytes in the hash. + //! Because of their size, superblocks are expensive to hash. A superblock + //! caches its quorum hash when calling the GetHash() method to speed up a + //! subsequent hash request. The block acceptance pipeline may need a hash + //! value in several places when processing a superblock. //! - //! \return A pointer to the beginning of the bytes in the hash, or a - //! \c nullptr value if the object contains an invalid hash. + //! The cached value is NOT invalidated when modifying a superblock. Call + //! the GetHash() method with the argument set to \c true to regenerate a + //! cached quorum hash. A received superblock's significant data is never + //! modified, so the need to regenerate a cached hash will rarely occur. //! - const unsigned char* Raw() const; + mutable QuorumHash m_hash_cache; +}; // Superblock - //! - //! \brief Get the string representation of the hash. - //! - //! \return A 64-character hex-encoded string for a SHA256 hash, or a - //! 32-character hex-encoded string for a legacy MD5 hash. Returns an - //! empty string for an invalid hash. - //! - std::string ToString() const; -private: - //! - //! \brief Contains the bytes of a SHA256 or MD5 digest. - //! - boost::variant m_hash; -}; // QuorumHash +//! +//! \brief Validate the supplied superblock by comparing it to local manifest +//! data. +//! +//! \param superblock The superblock to validate. +//! \param use_cache If \c false, skip validation with the scraper cache. +//! +//! \return \c True if the local manifest data produces a matching superblock. +//! +bool ValidateSuperblock(const Superblock& superblock, const bool use_cache = true); } // namespace NN namespace std { @@ -721,13 +934,17 @@ struct ConvergedScraperStats ScraperStats mScraperConvergedStats; ConvergedManifest Convergence; + // There is a small chance of collision on the key, but given this is really a hint map, + // It is okay. + // reduced nContentHash ------ SB Hash ---- Converged Manifest object + std::map> PastConvergences; + // Legacy superblock contract and hash. std::string sContractHash; std::string sContract; // New superblock object and hash. NN::Superblock NewFormatSuperblock; - NN::QuorumHash nNewFormatSuperblockHash; uint32_t GetVersion() { @@ -738,4 +955,41 @@ struct ConvergedScraperStats return nVersion; } + void AddConvergenceToPastConvergencesMap() + { + uint32_t nReducedContentHash = Convergence.nContentHash.Get64() >> 32; + + if (Convergence.nContentHash != uint256() && PastConvergences.find(nReducedContentHash) == PastConvergences.end()) + { + // This is specifically this form of insert to insure that if there is a hint "collision" the referenced + // SB Hash and Convergence stored will be the LATER one. + PastConvergences[nReducedContentHash] = std::make_pair(NewFormatSuperblock.GetHash(), Convergence); + } + } + + unsigned int DeleteOldConvergenceFromPastConvergencesMap() + { + unsigned int nDeleted = 0; + + std::map>::iterator iter; + for (iter = PastConvergences.begin(); iter != PastConvergences.end(); ) + { + // If the convergence entry is older than CManifest retention time, then delete the past convergence + // entry, because the underlying CManifest will be deleted by the housekeeping loop using the same + // aging. The erase advances the iterator in C++11. + if (iter->second.second.timestamp < GetAdjustedTime() - SCRAPER_CMANIFEST_RETENTION_TIME) + { + iter = PastConvergences.erase(iter); + + ++nDeleted; + } + else + { + ++iter; + } + } + + return nDeleted; + } + }; diff --git a/src/reverselock.h b/src/reverselock.h new file mode 100644 index 0000000000..9d9cc9fd77 --- /dev/null +++ b/src/reverselock.h @@ -0,0 +1,34 @@ +// Copyright (c) 2015-2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_REVERSELOCK_H +#define BITCOIN_REVERSELOCK_H + +/** + * An RAII-style reverse lock. Unlocks on construction and locks on destruction. + */ +template +class reverse_lock +{ +public: + + explicit reverse_lock(Lock& _lock) : lock(_lock) { + _lock.unlock(); + _lock.swap(templock); + } + + ~reverse_lock() { + templock.lock(); + templock.swap(lock); + } + +private: + reverse_lock(reverse_lock const&); + reverse_lock& operator=(reverse_lock const&); + + Lock& lock; + Lock templock; +}; + +#endif // BITCOIN_REVERSELOCK_H diff --git a/src/rpcclient.cpp b/src/rpcclient.cpp index 00559b6dfe..643534fb31 100644 --- a/src/rpcclient.cpp +++ b/src/rpcclient.cpp @@ -184,6 +184,8 @@ static const CRPCConvertParam vRPCConvertParams[] = { "getblockbynumber" , 1 }, { "getblockhash" , 0 }, { "listpollresults" , 1 }, + { "setban" , 2 }, + { "setban" , 3 }, { "showblock" , 0 }, }; diff --git a/src/rpcnet.cpp b/src/rpcnet.cpp index 0f616eda1c..59ac8effe5 100644 --- a/src/rpcnet.cpp +++ b/src/rpcnet.cpp @@ -9,6 +9,7 @@ #include "db.h" #include "walletdb.h" #include "net.h" +#include "banman.h" using namespace std; extern std::string NeuralRequest(std::string MyNeuralRequest); @@ -228,6 +229,126 @@ UniValue getaddednodeinfo(const UniValue& params, bool fHelp) return ret; } +UniValue setban(const UniValue& params, bool fHelp) +{ + std::string strCommand; + + if (!params[1].isNull()) + strCommand = params[1].get_str(); + + if (fHelp || params.size() < 2 || params.size() > 4 || (strCommand != "add" && strCommand != "remove")) + { + throw runtime_error( + "setban [bantime] [absolute]: add or remove an IP/Subnet from the banned list.\n" + "subnet: The IP/Subnet (see getpeerinfo for nodes IP) with an optional netmask (default is /32 = single IP) \n" + "command: 'add' to add an IP/Subnet to the list, 'remove' to remove an IP/Subnet from the list \n" + "bantime: time in seconds how long (or until when if [absolute] is set) the IP is banned \n" + " (0 or empty means using the default time of 24h which can also be overwritten by the -bantime startup argument)\n" + "absolute: Defaults to false. If set, the bantime must be an absolute timestamp in seconds since epoch (Jan 1 1970 GMT).\n" + ); + } + + if (!g_banman) { + throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded"); + } + + CSubNet subNet; + CNetAddr netAddr; + bool isSubnet = false; + + if (params[0].get_str().find('/') != std::string::npos) + isSubnet = true; + + if (!isSubnet) { + std::vector resolved; + LookupHost(params[0].get_str().c_str(), resolved, 1, false); + netAddr = resolved[0]; + } + else + LookupSubNet(params[0].get_str().c_str(), subNet); + + if (! (isSubnet ? subNet.IsValid() : netAddr.IsValid()) ) + throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Invalid IP/Subnet"); + + if (strCommand == "add") + { + if (isSubnet ? g_banman->IsBanned(subNet) : g_banman->IsBanned(netAddr)) { + throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: IP/Subnet already banned"); + } + + int64_t banTime = 0; //use standard bantime if not specified + if (!params[2].isNull()) + banTime = params[2].get_int64(); + + bool absolute = false; + if (params[3].isTrue()) + absolute = true; + + if (isSubnet) { + g_banman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute); + CNode::DisconnectNode(subNet); + } else { + g_banman->Ban(netAddr, BanReasonManuallyAdded, banTime, absolute); + CNode::DisconnectNode(netAddr); + } + } + else if(strCommand == "remove") + { + if (!( isSubnet ? g_banman->Unban(subNet) : g_banman->Unban(netAddr) )) { + throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously banned."); + } + } + return NullUniValue; +} + +UniValue listbanned(const UniValue& params, bool fHelp) +{ + if (fHelp || params.size() != 0) + { + throw runtime_error( + "listbanned: List all banned IPs/subnets.\n" + ); + } + + if(!g_banman) { + throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded"); + } + + banmap_t banMap; + g_banman->GetBanned(banMap); + + UniValue bannedAddresses(UniValue::VARR); + for (const auto& entry : banMap) + { + const CBanEntry& banEntry = entry.second; + UniValue rec(UniValue::VOBJ); + rec.pushKV("address", entry.first.ToString()); + rec.pushKV("banned_until", banEntry.nBanUntil); + rec.pushKV("ban_created", banEntry.nCreateTime); + rec.pushKV("ban_reason", banEntry.banReasonToString()); + + bannedAddresses.push_back(rec); + } + + return bannedAddresses; +} + +UniValue clearbanned(const UniValue& params, bool fHelp) +{ + if (fHelp || params.size() != 0) + throw runtime_error( + "clearbanned: Clear all banned IPs.\n" + ); + + if (!g_banman) { + throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded"); + } + + g_banman->ClearBanned(); + + return NullUniValue; +} + bool AsyncNeuralRequest(std::string command_name,std::string cpid,int NodeLimit) { diff --git a/src/rpcprotocol.h b/src/rpcprotocol.h index df097ff917..5ee8294a29 100644 --- a/src/rpcprotocol.h +++ b/src/rpcprotocol.h @@ -50,8 +50,13 @@ enum RPCErrorCode RPC_DEPRECATED = -23, // Use for deprecated commands // P2P client errors - RPC_CLIENT_NOT_CONNECTED = -9, // Bitcoin is not connected + RPC_CLIENT_NOT_CONNECTED = -9, // Gridcoin is not connected RPC_CLIENT_IN_INITIAL_DOWNLOAD = -10, // Still downloading initial blocks + RPC_CLIENT_NODE_ALREADY_ADDED = -23, // Node is already added + RPC_CLIENT_NODE_NOT_ADDED = -24, // Node has not been added before + RPC_CLIENT_NODE_NOT_CONNECTED = -29, // Node to disconnect not found in connected nodes + RPC_CLIENT_INVALID_IP_OR_SUBNET = -30, // Invalid IP/Subnet + RPC_CLIENT_P2P_DISABLED = -31, // No valid connection manager instance found // Wallet errors RPC_WALLET_ERROR = -4, // Unspecified problem with wallet (key not found etc.) diff --git a/src/rpcserver.cpp b/src/rpcserver.cpp index e916e62d40..1b2f56a008 100644 --- a/src/rpcserver.cpp +++ b/src/rpcserver.cpp @@ -408,6 +408,7 @@ static const CRPCCommand vRPCCommands[] = { "askforoutstandingblocks", &askforoutstandingblocks, cat_network }, { "getblockchaininfo", &getblockchaininfo, cat_network }, { "getnetworkinfo", &getnetworkinfo, cat_network }, + { "clearbanned", &clearbanned, cat_network }, { "currenttime", ¤ttime, cat_network }, { "getaddednodeinfo", &getaddednodeinfo, cat_network }, { "getbestblockhash", &getbestblockhash, cat_network }, @@ -424,12 +425,14 @@ static const CRPCCommand vRPCCommands[] = { "getrawmempool", &getrawmempool, cat_network }, { "listallpolls", &listallpolls, cat_network }, { "listallpolldetails", &listallpolldetails, cat_network }, + { "listbanned", &listbanned, cat_network }, { "listpolldetails", &listpolldetails, cat_network }, { "listpollresults", &listpollresults, cat_network }, { "listpolls", &listpolls, cat_network }, { "memorypool", &memorypool, cat_network }, { "networktime", &networktime, cat_network }, { "ping", &ping, cat_network }, + { "setban", &setban, cat_network }, { "showblock", &showblock, cat_network }, { "stop", &stop, cat_network }, { "vote", &vote, cat_network }, diff --git a/src/rpcserver.h b/src/rpcserver.h index b2ef165559..f9b99d47d7 100644 --- a/src/rpcserver.h +++ b/src/rpcserver.h @@ -225,6 +225,7 @@ extern UniValue testnewsb(const UniValue& params, bool fHelp); extern UniValue addnode(const UniValue& params, bool fHelp); extern UniValue addpoll(const UniValue& params, bool fHelp); extern UniValue askforoutstandingblocks(const UniValue& params, bool fHelp); +extern UniValue clearbanned(const UniValue& params, bool fHelp); extern UniValue currenttime(const UniValue& params, bool fHelp); extern UniValue getaddednodeinfo(const UniValue& params, bool fHelp); extern UniValue getbestblockhash(const UniValue& params, bool fHelp); @@ -243,6 +244,7 @@ extern UniValue getpeerinfo(const UniValue& params, bool fHelp); extern UniValue getrawmempool(const UniValue& params, bool fHelp); extern UniValue listallpolls(const UniValue& params, bool fHelp); extern UniValue listallpolldetails(const UniValue& params, bool fHelp); +extern UniValue listbanned(const UniValue& params, bool fHelp); extern UniValue listpolldetails(const UniValue& params, bool fHelp); extern UniValue listpollresults(const UniValue& params, bool fHelp); extern UniValue listpolls(const UniValue& params, bool fHelp); @@ -252,6 +254,7 @@ extern UniValue ping(const UniValue& params, bool fHelp); extern UniValue rpc_getsupervotes(const UniValue& params, bool fHelp); extern UniValue rpc_exportstats(const UniValue& params, bool fHelp); extern UniValue rpc_getrecentblocks(const UniValue& params, bool fHelp); +extern UniValue setban(const UniValue& params, bool fHelp); extern UniValue showblock(const UniValue& params, bool fHelp); extern UniValue vote(const UniValue& params, bool fHelp); extern UniValue votedetails(const UniValue& params, bool fHelp); diff --git a/src/scheduler.cpp b/src/scheduler.cpp new file mode 100644 index 0000000000..ebaadb9921 --- /dev/null +++ b/src/scheduler.cpp @@ -0,0 +1,214 @@ +// Copyright (c) 2015-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +//#include +#include "util.h" +#include + +#include +#include + +CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false) +{ +} + +CScheduler::~CScheduler() +{ + assert(nThreadsServicingQueue == 0); +} + + +#if BOOST_VERSION < 105000 +static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t) +{ + // Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t, + // start with a posix_time at the epoch (0) and add the milliseconds that have passed since then. + return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast(t.time_since_epoch()).count()); +} +#endif + +void CScheduler::serviceQueue() +{ + boost::unique_lock lock(newTaskMutex); + ++nThreadsServicingQueue; + + // newTaskMutex is locked throughout this loop EXCEPT + // when the thread is waiting or when the user's function + // is called. + while (!shouldStop()) { + try { + if (!shouldStop() && taskQueue.empty()) { + reverse_lock > rlock(lock); + // Use this chance to get more entropy - this is when more rebasing to Bitcoin is done... + //RandAddSeedSleep(); + MilliSleep(1); + } + while (!shouldStop() && taskQueue.empty()) { + // Wait until there is something to do. + newTaskScheduled.wait(lock); + } + + // Wait until either there is a new task, or until + // the time of the first item on the queue: + +// wait_until needs boost 1.50 or later; older versions have timed_wait: +#if BOOST_VERSION < 105000 + while (!shouldStop() && !taskQueue.empty() && + newTaskScheduled.timed_wait(lock, toPosixTime(taskQueue.begin()->first))) { + // Keep waiting until timeout + } +#else + // Some boost versions have a conflicting overload of wait_until that returns void. + // Explicitly use a template here to avoid hitting that overload. + while (!shouldStop() && !taskQueue.empty()) { + boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first; + if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout) + break; // Exit loop after timeout, it means we reached the time of the event + } +#endif + // If there are multiple threads, the queue can empty while we're waiting (another + // thread may service the task we were waiting on). + if (shouldStop() || taskQueue.empty()) + continue; + + Function f = taskQueue.begin()->second; + taskQueue.erase(taskQueue.begin()); + + { + // Unlock before calling f, so it can reschedule itself or another task + // without deadlocking: + reverse_lock > rlock(lock); + f(); + } + } catch (...) { + --nThreadsServicingQueue; + throw; + } + } + --nThreadsServicingQueue; + newTaskScheduled.notify_one(); +} + +void CScheduler::stop(bool drain) +{ + { + boost::unique_lock lock(newTaskMutex); + if (drain) + stopWhenEmpty = true; + else + stopRequested = true; + } + newTaskScheduled.notify_all(); +} + +void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t) +{ + { + boost::unique_lock lock(newTaskMutex); + taskQueue.insert(std::make_pair(t, f)); + } + newTaskScheduled.notify_one(); +} + +void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds) +{ + schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds)); +} + +static void Repeat(CScheduler* s, CScheduler::Function f, int64_t deltaMilliSeconds) +{ + f(); + s->scheduleFromNow(std::bind(&Repeat, s, f, deltaMilliSeconds), deltaMilliSeconds); +} + +void CScheduler::scheduleEvery(CScheduler::Function f, int64_t deltaMilliSeconds) +{ + scheduleFromNow(std::bind(&Repeat, this, f, deltaMilliSeconds), deltaMilliSeconds); +} + +size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first, + boost::chrono::system_clock::time_point &last) const +{ + boost::unique_lock lock(newTaskMutex); + size_t result = taskQueue.size(); + if (!taskQueue.empty()) { + first = taskQueue.begin()->first; + last = taskQueue.rbegin()->first; + } + return result; +} + +bool CScheduler::AreThreadsServicingQueue() const { + boost::unique_lock lock(newTaskMutex); + return nThreadsServicingQueue; +} + + +void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() { + { + LOCK(m_cs_callbacks_pending); + // Try to avoid scheduling too many copies here, but if we + // accidentally have two ProcessQueue's scheduled at once its + // not a big deal. + if (m_are_callbacks_running) return; + if (m_callbacks_pending.empty()) return; + } + m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this)); +} + +void SingleThreadedSchedulerClient::ProcessQueue() { + std::function callback; + { + LOCK(m_cs_callbacks_pending); + if (m_are_callbacks_running) return; + if (m_callbacks_pending.empty()) return; + m_are_callbacks_running = true; + + callback = std::move(m_callbacks_pending.front()); + m_callbacks_pending.pop_front(); + } + + // RAII the setting of fCallbacksRunning and calling MaybeScheduleProcessQueue + // to ensure both happen safely even if callback() throws. + struct RAIICallbacksRunning { + SingleThreadedSchedulerClient* instance; + explicit RAIICallbacksRunning(SingleThreadedSchedulerClient* _instance) : instance(_instance) {} + ~RAIICallbacksRunning() { + { + LOCK(instance->m_cs_callbacks_pending); + instance->m_are_callbacks_running = false; + } + instance->MaybeScheduleProcessQueue(); + } + } raiicallbacksrunning(this); + + callback(); +} + +void SingleThreadedSchedulerClient::AddToProcessQueue(std::function func) { + assert(m_pscheduler); + + { + LOCK(m_cs_callbacks_pending); + m_callbacks_pending.emplace_back(std::move(func)); + } + MaybeScheduleProcessQueue(); +} + +void SingleThreadedSchedulerClient::EmptyQueue() { + assert(!m_pscheduler->AreThreadsServicingQueue()); + bool should_continue = true; + while (should_continue) { + ProcessQueue(); + LOCK(m_cs_callbacks_pending); + should_continue = !m_callbacks_pending.empty(); + } +} + +size_t SingleThreadedSchedulerClient::CallbacksPending() { + LOCK(m_cs_callbacks_pending); + return m_callbacks_pending.size(); +} diff --git a/src/scheduler.h b/src/scheduler.h new file mode 100644 index 0000000000..436f661c59 --- /dev/null +++ b/src/scheduler.h @@ -0,0 +1,126 @@ +// Copyright (c) 2015-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SCHEDULER_H +#define BITCOIN_SCHEDULER_H + +// +// NOTE: +// boost::thread / boost::chrono should be ported to std::thread / std::chrono +// when we support C++11. +// +#include +#include +#include + +#include + +// +// Simple class for background tasks that should be run +// periodically or once "after a while" +// +// Usage: +// +// CScheduler* s = new CScheduler(); +// s->scheduleFromNow(doSomething, 11); // Assuming a: void doSomething() { } +// s->scheduleFromNow(std::bind(Class::func, this, argument), 3); +// boost::thread* t = new boost::thread(std::bind(CScheduler::serviceQueue, s)); +// +// ... then at program shutdown, clean up the thread running serviceQueue: +// t->interrupt(); +// t->join(); +// delete t; +// delete s; // Must be done after thread is interrupted/joined. +// + +class CScheduler +{ +public: + CScheduler(); + ~CScheduler(); + + typedef std::function Function; + + // Call func at/after time t + void schedule(Function f, boost::chrono::system_clock::time_point t=boost::chrono::system_clock::now()); + + // Convenience method: call f once deltaMilliSeconds from now + void scheduleFromNow(Function f, int64_t deltaMilliSeconds); + + // Another convenience method: call f approximately + // every deltaMilliSeconds forever, starting deltaMilliSeconds from now. + // To be more precise: every time f is finished, it + // is rescheduled to run deltaMilliSeconds later. If you + // need more accurate scheduling, don't use this method. + void scheduleEvery(Function f, int64_t deltaMilliSeconds); + + // To keep things as simple as possible, there is no unschedule. + + // Services the queue 'forever'. Should be run in a thread, + // and interrupted using boost::interrupt_thread + void serviceQueue(); + + // Tell any threads running serviceQueue to stop as soon as they're + // done servicing whatever task they're currently servicing (drain=false) + // or when there is no work left to be done (drain=true) + void stop(bool drain=false); + + // Returns number of tasks waiting to be serviced, + // and first and last task times + size_t getQueueInfo(boost::chrono::system_clock::time_point &first, + boost::chrono::system_clock::time_point &last) const; + + // Returns true if there are threads actively running in serviceQueue() + bool AreThreadsServicingQueue() const; + +private: + std::multimap taskQueue; + boost::condition_variable newTaskScheduled; + mutable boost::mutex newTaskMutex; + int nThreadsServicingQueue; + bool stopRequested; + bool stopWhenEmpty; + bool shouldStop() const { return stopRequested || (stopWhenEmpty && taskQueue.empty()); } +}; + +/** + * Class used by CScheduler clients which may schedule multiple jobs + * which are required to be run serially. Jobs may not be run on the + * same thread, but no two jobs will be executed + * at the same time and memory will be release-acquire consistent + * (the scheduler will internally do an acquire before invoking a callback + * as well as a release at the end). In practice this means that a callback + * B() will be able to observe all of the effects of callback A() which executed + * before it. + */ +class SingleThreadedSchedulerClient { +private: + CScheduler *m_pscheduler; + + CCriticalSection m_cs_callbacks_pending; + std::list> m_callbacks_pending GUARDED_BY(m_cs_callbacks_pending); + bool m_are_callbacks_running GUARDED_BY(m_cs_callbacks_pending) = false; + + void MaybeScheduleProcessQueue(); + void ProcessQueue(); + +public: + explicit SingleThreadedSchedulerClient(CScheduler *pschedulerIn) : m_pscheduler(pschedulerIn) {} + + /** + * Add a callback to be executed. Callbacks are executed serially + * and memory is release-acquire consistent between callback executions. + * Practically, this means that callbacks can behave as if they are executed + * in order by a single thread. + */ + void AddToProcessQueue(std::function func); + + // Processes all remaining queue members on the calling thread, blocking until queue is empty + // Must be called after the CScheduler has no remaining processing threads! + void EmptyQueue(); + + size_t CallbacksPending(); +}; + +#endif diff --git a/src/scraper/fwd.h b/src/scraper/fwd.h index fe012c4e7d..79371d4003 100644 --- a/src/scraper/fwd.h +++ b/src/scraper/fwd.h @@ -30,6 +30,15 @@ enum class scrapereventtypes Sleep }; +enum class scraperSBvalidationtype +{ + Invalid, + Unknown, + CurrentCachedConvergence, + CachedPastConvergence, + ManifestLevelConvergence, + ProjectLevelConvergence +}; /********************* @@ -63,6 +72,11 @@ struct ConvergedManifest // Used when convergence is at the manifest level (normal) std::map mIncludedScraperManifests; + // The below is the manifest content hash for the underlying manifests that comprise the convergence. This + // will only be populated if the convergence is at the manifest level (bByParts == false). In that case, each + // manifest's content in the convergence must be the same. If the convergence is by project, this does not + // make sense to populate. See the above comment. + uint256 nUnderlyingManifestContentHash; // Used when convergence is at the manifest level (normal) and also at the part (project) level for // scrapers that are not part of any part (project) level convergence. diff --git a/src/scraper/scraper.cpp b/src/scraper/scraper.cpp index 338ed0e1e7..ddb73a01c6 100755 --- a/src/scraper/scraper.cpp +++ b/src/scraper/scraper.cpp @@ -1,3 +1,4 @@ +#include "main.h" #include "neuralnet/neuralnet.h" #include "scraper.h" #include "scraper_net.h" @@ -20,6 +21,7 @@ #include #include #include +#include // These are initialized empty. GetDataDir() cannot be called here. It is too early. fs::path pathDataDir = {}; @@ -117,6 +119,7 @@ unsigned int DeleteScraperFileManifestEntry(ScraperFileManifestEntry& entry); bool MarkScraperFileManifestEntryNonCurrent(ScraperFileManifestEntry& entry); void AlignScraperFileManifestEntries(const fs::path& file, const std::string& filetype, const std::string& sProject, const bool& excludefromcsmanifest); ScraperStats GetScraperStatsByConsensusBeaconList(); +ScraperStats GetScraperStatsFromSingleManifest(CScraperManifest &manifest); bool LoadProjectFileToStatsByCPID(const std::string& project, const fs::path& file, const double& projectmag, const BeaconMap& mBeaconMap, ScraperStats& mScraperStats); bool LoadProjectObjectToStatsByCPID(const std::string& project, const CSerializeData& ProjectData, const double& projectmag, const BeaconMap& mBeaconMap, ScraperStats& mScraperStats); bool ProcessProjectStatsFromStreamByCPID(const std::string& project, boostio::filtering_istream& sUncompressedIn, @@ -128,14 +131,12 @@ bool ScraperSendFileManifestContents(CBitcoinAddress& Address, CKey& Key); mmCSManifestsBinnedByScraper BinCScraperManifestsByScraper(); mmCSManifestsBinnedByScraper ScraperDeleteCScraperManifests(); unsigned int ScraperDeleteUnauthorizedCScraperManifests(); -bool ScraperDeleteCScraperManifest(uint256 nManifestHash); bool ScraperConstructConvergedManifest(ConvergedManifest& StructConvergedManifest); bool ScraperConstructConvergedManifestByProject(const NN::WhitelistSnapshot& projectWhitelist, mmCSManifestsBinnedByScraper& mMapCSManifestsBinnedByScraper, ConvergedManifest& StructConvergedManifest); std::string GenerateSBCoreDataFromScraperStats(ScraperStats& mScraperStats); // Overloaded. See alternative in scraper.h. std::string ScraperGetNeuralHash(std::string sNeuralContract); -NN::QuorumHash ScraperGetSuperblockHash(NN::Superblock& superblock); bool DownloadProjectHostFiles(const NN::WhitelistSnapshot& projectWhitelist); bool DownloadProjectTeamFiles(const NN::WhitelistSnapshot& projectWhitelist); @@ -1061,6 +1062,21 @@ bool ScraperHousekeeping() sSBCoreData = ScraperGetNeuralContract(true, false); } + { + LOCK(CScraperManifest::cs_mapManifest); + + unsigned int nPendingDeleted = 0; + + _log(logattribute::INFO, "ScraperHousekeeping", "Size of mapPendingDeletedManifest before delete = " + + std::to_string(CScraperManifest::mapPendingDeletedManifest.size())); + + // Make sure deleted manifests pending permanent deletion are culled. + nPendingDeleted = CScraperManifest::DeletePendingDeletedManifests(); + _log(logattribute::INFO, "ScraperHousekeeping", "Permanently deleted " + std::to_string(nPendingDeleted) + " manifest(s) pending permanent deletion."); + _log(logattribute::INFO, "ScraperHousekeeping", "Size of mapPendingDeletedManifest after delete = " + + std::to_string(CScraperManifest::mapPendingDeletedManifest.size())); + } + if (fDebug3 && !sSBCoreData.empty()) { UniValue dummy_params(UniValue::VARR); @@ -3054,6 +3070,107 @@ ScraperStats GetScraperStatsByConvergedManifest(ConvergedManifest& StructConverg } + +// This function should only be used as part of the superblock validation in bv11+. +ScraperStats GetScraperStatsFromSingleManifest(CScraperManifest& manifest) +{ + _log(logattribute::INFO, "GetScraperStatsFromSingleManifest", "Beginning stats processing."); + + // Create a dummy converged manifest + ConvergedManifest StructDummyConvergedManifest; + + ScraperStats mScraperStats {}; + + // Fill out the dummy ConvergedManifest structure. Note this assumes one-to-one part to project statistics BLOB. Needs to + // be fixed for more than one part per BLOB. This is easy in this case, because it is all from/referring to one manifest. + + StructDummyConvergedManifest.ConsensusBlock = manifest.ConsensusBlock; + StructDummyConvergedManifest.timestamp = GetAdjustedTime(); + StructDummyConvergedManifest.bByParts = false; + + int iPartNum = 0; + CDataStream ss(SER_NETWORK,1); + WriteCompactSize(ss, manifest.vParts.size()); + uint256 nContentHashCheck; + + for (const auto& iter : manifest.vParts) + { + std::string sProject; + + if (iPartNum == 0) + sProject = "BeaconList"; + else + sProject = manifest.projects[iPartNum-1].project; + + // Copy the parts data into the map keyed by project. + StructDummyConvergedManifest.ConvergedManifestPartsMap.insert(std::make_pair(sProject, iter->data)); + + // Serialize the hash to doublecheck the content hash. + ss << iter->hash; + + iPartNum++; + } + ss << StructDummyConvergedManifest.ConsensusBlock; + + nContentHashCheck = Hash(ss.begin(), ss.end()); + + if (nContentHashCheck != manifest.nContentHash) + { + _log(logattribute::ERR, "GetScraperStatsFromSingleManifest", "Selected Manifest content hash check failed! nContentHashCheck = " + + nContentHashCheck.GetHex() + " and nContentHash = " + manifest.nContentHash.GetHex()); + // Content hash check failed. Return empty mScraperStats + return mScraperStats; + } + else // Content matches. + { + // The DummyConvergedManifest content hash is NOT the same as the hash above from the CScraper::manifest, because it needs to be in the order of the + // map key and on the data, not the order of vParts by the part hash. So, unfortunately, we have to walk through the map again to hash it correctly. + CDataStream ss2(SER_NETWORK,1); + for (const auto& iter : StructDummyConvergedManifest.ConvergedManifestPartsMap) + ss2 << iter.second; + + StructDummyConvergedManifest.nContentHash = Hash(ss2.begin(), ss2.end()); + } + + // Enumerate the count of active projects from the dummy converged manifest. One of the parts + // is the beacon list, is not a project, which is why there is a -1. + unsigned int nActiveProjects = StructDummyConvergedManifest.ConvergedManifestPartsMap.size() - 1; + _log(logattribute::INFO, "GetScraperStatsFromSingleManifest", "Number of active projects in converged manifest = " + std::to_string(nActiveProjects)); + + double dMagnitudePerProject = NEURALNETWORKMULTIPLIER / nActiveProjects; + + //Get the Consensus Beacon map and initialize mScraperStats. + BeaconMap mBeaconMap; + LoadBeaconListFromConvergedManifest(StructDummyConvergedManifest, mBeaconMap); + + for (auto entry = StructDummyConvergedManifest.ConvergedManifestPartsMap.begin(); entry != StructDummyConvergedManifest.ConvergedManifestPartsMap.end(); ++entry) + { + std::string project = entry->first; + ScraperStats mProjectScraperStats; + + // Do not process the BeaconList itself as a project stats file. + if (project != "BeaconList") + { + _log(logattribute::INFO, "GetScraperStatsFromSingleManifest", "Processing stats for project: " + project); + + LoadProjectObjectToStatsByCPID(project, entry->second, dMagnitudePerProject, mBeaconMap, mProjectScraperStats); + + // Insert into overall map. + for (auto const& entry2 : mProjectScraperStats) + { + mScraperStats[entry2.first] = entry2.second; + } + } + } + + ProcessNetworkWideFromProjectStats(mBeaconMap, mScraperStats); + + _log(logattribute::INFO, "GetScraperStatsFromSingleManifest", "Completed stats processing"); + + return mScraperStats; +} + + std::string ExplainMagnitude(std::string sCPID) { // See if converged stats/contract update needed... @@ -3451,8 +3568,9 @@ unsigned int ScraperDeleteUnauthorizedCScraperManifests() else { _log(logattribute::WARNING, "ScraperDeleteUnauthorizedCScraperManifests", "Deleting unauthorized manifest with hash " + iter->first.GetHex()); - // Delete from CScraperManifest map (also advances iter to the next valid element). - iter = CScraperManifest::DeleteManifest(iter); + // Delete from CScraperManifest map (also advances iter to the next valid element). Immediate flag is set, because there should be + // no pending delete retention grace for this. + iter = CScraperManifest::DeleteManifest(iter, true); nDeleted++; } } @@ -3614,7 +3732,7 @@ bool ScraperSendFileManifestContents(CBitcoinAddress& Address, CKey& Key) if (bAddManifestSuccessful) _log(logattribute::INFO, "ScraperSendFileManifestContents", "addManifest (send) from this scraper (address " + sCManifestName + ") successful, timestamp " - + DateTimeStrFormat("%x %H:%M:%S", nTime)); + + DateTimeStrFormat("%x %H:%M:%S", nTime) + " with " + std::to_string(iPartNum) + " parts."); else _log(logattribute::ERR, "ScraperSendFileManifestContents", "addManifest (send) from this scraper (address " + sCManifestName + ") FAILED, timestamp " @@ -3810,6 +3928,9 @@ bool ScraperConstructConvergedManifest(ConvergedManifest& StructConvergedManifes } else // Content matches so we have a confirmed convergence. { + // Copy the MANIFEST content hash into the ConvergedManifest. + StructConvergedManifest.nUnderlyingManifestContentHash = convergence->first; + // The ConvergedManifest content hash is NOT the same as the hash above from the CScraper::manifest, because it needs to be in the order of the // map key and on the data, not the order of vParts by the part hash. So, unfortunately, we have to walk through the map again to hash it correctly. CDataStream ss2(SER_NETWORK,1); @@ -3823,7 +3944,7 @@ bool ScraperConstructConvergedManifest(ConvergedManifest& StructConvergedManifes { if (StructConvergedManifest.ConvergedManifestPartsMap.find(iProjects.m_name) == StructConvergedManifest.ConvergedManifestPartsMap.end()) { - _log(logattribute::WARNING, "ScraperConstructConvergedManifestByProject", "Project " + _log(logattribute::WARNING, "ScraperConstructConvergedManifest", "Project " + iProjects.m_name + " was excluded because the converged manifests from the scrapers all excluded the project. \n" + "Falling back to attempt convergence by project to try and recover excluded project."); @@ -3833,6 +3954,17 @@ bool ScraperConstructConvergedManifest(ConvergedManifest& StructConvergedManifes // Since we are falling back to project level and discarding this convergence, no need to process any more once one missed project is found. break; } + + if (StructConvergedManifest.ConvergedManifestPartsMap.find("BeaconList") == StructConvergedManifest.ConvergedManifestPartsMap.end()) + { + _log(logattribute::WARNING, "ScraperConstructConvergedManifest", "BeaconList was not found in the converged manifests from the scrapers. \n" + "Falling back to attempt convergence by project."); + + bConvergenceSuccessful = false; + + // Since we are falling back to project level and discarding this convergence, no need to process any more if BeaconList is missing. + break; + } } } @@ -3879,7 +4011,7 @@ bool ScraperConstructConvergedManifestByProject(const NN::WhitelistSnapshot& pro uint256 nManifestHashForConvergedBeaconList = 0; // We are going to do this for each project in the whitelist. - unsigned int iCountSuccesfulConvergedProjects = 0; + unsigned int iCountSuccessfulConvergedProjects = 0; unsigned int nScraperCount = mMapCSManifestsBinnedByScraper.size(); _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Number of Scrapers with manifests = " + std::to_string(nScraperCount)); @@ -3956,8 +4088,15 @@ bool ScraperConstructConvergedManifestByProject(const NN::WhitelistSnapshot& pro { // Insert into mProjectObjectsBinnedbyContent -------- content hash ------------------- ScraperID -------- Project. mProjectObjectsBinnedbyContent.insert(std::make_pair(nProjectObjectHash, std::make_pair(iter.first, iWhitelistProject.m_name))); - if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "mManifestsBinnedbyContent insert " - + nProjectObjectHash.GetHex() + ", " + iter.first + ", " + iWhitelistProject.m_name); + //if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "mProjectObjectsBinnedbyContent insert " + // + nProjectObjectHash.GetHex() + ", " + iter.first + ", " + iWhitelistProject.m_name); + if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "mProjectObjectsBinnedbyContent insert, timestamp " + + DateTimeStrFormat("%x %H:%M:%S", manifest.nTime) + + ", content hash "+ nProjectObjectHash.GetHex() + + ", scraper ID " + iter.first + + ", project " + iWhitelistProject.m_name + + ", manifest hash " + nCSManifestHash.GetHex()); + } } } @@ -4022,7 +4161,7 @@ bool ScraperConstructConvergedManifestByProject(const NN::WhitelistSnapshot& pro nManifestHashForConvergedBeaconList = std::get<2>(iter.second); } - iCountSuccesfulConvergedProjects++; + iCountSuccessfulConvergedProjects++; // Note this break is VERY important, it prevents considering essentially the same project object that meets convergence multiple times. break; @@ -4031,7 +4170,7 @@ bool ScraperConstructConvergedManifestByProject(const NN::WhitelistSnapshot& pro } // If we meet the rule of CONVERGENCE_BY_PROJECT_RATIO, then proceed to fill out the rest of the map. - if ((double)iCountSuccesfulConvergedProjects / (double)projectWhitelist.size() >= CONVERGENCE_BY_PROJECT_RATIO) + if ((double)iCountSuccessfulConvergedProjects / (double)projectWhitelist.size() >= CONVERGENCE_BY_PROJECT_RATIO) { // Fill out the the rest of the ConvergedManifest structure. Note this assumes one-to-one part to project statistics BLOB. Needs to // be fixed for more than one part per BLOB. This is easy in this case, because it is all from/referring to one manifest. @@ -4048,93 +4187,108 @@ bool ScraperConstructConvergedManifestByProject(const NN::WhitelistSnapshot& pro auto pair = CScraperManifest::mapManifest.find(nManifestHashForConvergedBeaconList); CScraperManifest& manifest = *pair->second; - // The vParts[0] is always the BeaconList. - StructConvergedManifest.ConvergedManifestPartsMap.insert(std::make_pair("BeaconList", manifest.vParts[0]->data)); + // Bail if BeaconList is not found or empty. + if (pair == CScraperManifest::mapManifest.end() || manifest.vParts[0]->data.size() == 0) + { + _log(logattribute::WARNING, "ScraperConstructConvergedManifestByProject", "BeaconList was not found in the converged manifests from the scrapers. \n" + "Falling back to attempt convergence by project."); - StructConvergedManifest.ConsensusBlock = nConvergedConsensusBlock; + bConvergenceSuccessful = false; + } + else + { + // The vParts[0] is always the BeaconList. + StructConvergedManifest.ConvergedManifestPartsMap.insert(std::make_pair("BeaconList", manifest.vParts[0]->data)); - // The ConvergedManifest content hash is in the order of the map key and on the data. - for (const auto& iter : StructConvergedManifest.ConvergedManifestPartsMap) - ss << iter.second; + StructConvergedManifest.ConsensusBlock = nConvergedConsensusBlock; - StructConvergedManifest.nContentHash = Hash(ss.begin(), ss.end()); - StructConvergedManifest.timestamp = GetAdjustedTime(); - StructConvergedManifest.bByParts = true; + // The ConvergedManifest content hash is in the order of the map key and on the data. + for (const auto& iter : StructConvergedManifest.ConvergedManifestPartsMap) + ss << iter.second; - bConvergenceSuccessful = true; + StructConvergedManifest.nContentHash = Hash(ss.begin(), ss.end()); + StructConvergedManifest.timestamp = GetAdjustedTime(); + StructConvergedManifest.bByParts = true; - _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Successful convergence by project: " - + std::to_string(iCountSuccesfulConvergedProjects) + " out of " + std::to_string(projectWhitelist.size()) - + " projects at " - + DateTimeStrFormat("%x %H:%M:%S", StructConvergedManifest.timestamp)); + bConvergenceSuccessful = true; - // Fill out the the excluded projects vector and the included scraper count (by project) map - for (const auto& iProjects : projectWhitelist) - { - if (StructConvergedManifest.ConvergedManifestPartsMap.find(iProjects.m_name) == StructConvergedManifest.ConvergedManifestPartsMap.end()) - { - // Project in whitelist was not in the map, so it goes in the exclusion vector. - StructConvergedManifest.vExcludedProjects.push_back(iProjects.m_name); - _log(logattribute::WARNING, "ScraperConstructConvergedManifestByProject", "Project " - + iProjects.m_name - + " was excluded because there was no convergence from the scrapers for this project at the project level."); + _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Successful convergence by project: " + + std::to_string(iCountSuccessfulConvergedProjects) + " out of " + std::to_string(projectWhitelist.size()) + + " projects at " + + DateTimeStrFormat("%x %H:%M:%S", StructConvergedManifest.timestamp)); - continue; - } + // Fill out the the excluded projects vector and the included scraper count (by project) map + for (const auto& iProjects : projectWhitelist) + { + if (StructConvergedManifest.ConvergedManifestPartsMap.find(iProjects.m_name) == StructConvergedManifest.ConvergedManifestPartsMap.end()) + { + // Project in whitelist was not in the map, so it goes in the exclusion vector. + StructConvergedManifest.vExcludedProjects.push_back(iProjects.m_name); + _log(logattribute::WARNING, "ScraperConstructConvergedManifestByProject", "Project " + + iProjects.m_name + + " was excluded because there was no convergence from the scrapers for this project at the project level."); - unsigned int nScraperConvergenceCount = StructConvergedManifest.mIncludedScrapersbyProject.count(iProjects.m_name); - StructConvergedManifest.mScraperConvergenceCountbyProject.insert(std::make_pair(iProjects.m_name, nScraperConvergenceCount)); + continue; + } - if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Project " + iProjects.m_name - + ": " + std::to_string(nScraperConvergenceCount) + " scraper(s) converged"); - } + unsigned int nScraperConvergenceCount = StructConvergedManifest.mIncludedScrapersbyProject.count(iProjects.m_name); + StructConvergedManifest.mScraperConvergenceCountbyProject.insert(std::make_pair(iProjects.m_name, nScraperConvergenceCount)); - // Fill out the included and excluded scraper vector for scrapers that did not participate in any project level convergence. - for (const auto& iScraper : mMapCSManifestsBinnedByScraper) - { - if (StructConvergedManifest.mIncludedProjectsbyScraper.count(iScraper.first)) - { - StructConvergedManifest.vIncludedScrapers.push_back(iScraper.first); - if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Scraper " - + iScraper.first - + " was included in one or more project level convergences."); + if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Project " + iProjects.m_name + + ": " + std::to_string(nScraperConvergenceCount) + " scraper(s) converged"); } - else + + // Fill out the included and excluded scraper vector for scrapers that did not participate in any project level convergence. + for (const auto& iScraper : mMapCSManifestsBinnedByScraper) { - StructConvergedManifest.vExcludedScrapers.push_back(iScraper.first); - _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Scraper " - + iScraper.first - + " was excluded because it was not included in any project level convergence."); + if (StructConvergedManifest.mIncludedProjectsbyScraper.count(iScraper.first)) + { + StructConvergedManifest.vIncludedScrapers.push_back(iScraper.first); + if (fDebug3) _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Scraper " + + iScraper.first + + " was included in one or more project level convergences."); + } + else + { + StructConvergedManifest.vExcludedScrapers.push_back(iScraper.first); + _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "Scraper " + + iScraper.first + + " was excluded because it was not included in any project level convergence."); + } } - } - // Retrieve the complete list of scrapers from the AppCache to determine scrapers not publishing at all. - AppCacheSection mScrapers = ReadCacheSection(Section::SCRAPER); + // Retrieve the complete list of scrapers from the AppCache to determine scrapers not publishing at all. + AppCacheSection mScrapers = ReadCacheSection(Section::SCRAPER); - for (const auto& iScraper : mScrapers) - { - // Only include scrapers enabled in protocol. - if (iScraper.second.value == "true" || iScraper.second.value == "1") + for (const auto& iScraper : mScrapers) { - if (std::find(std::begin(StructConvergedManifest.vExcludedScrapers), std::end(StructConvergedManifest.vExcludedScrapers), iScraper.first) - == std::end(StructConvergedManifest.vExcludedScrapers) - && std::find(std::begin(StructConvergedManifest.vIncludedScrapers), std::end(StructConvergedManifest.vIncludedScrapers), iScraper.first) - == std::end(StructConvergedManifest.vIncludedScrapers)) + // Only include scrapers enabled in protocol. + if (iScraper.second.value == "true" || iScraper.second.value == "1") { - StructConvergedManifest.vScrapersNotPublishing.push_back(iScraper.first); - _log(logattribute::INFO, "ScraperConstructConvergedManifesByProject", "Scraper " + iScraper.first + " authorized but not publishing."); + if (std::find(std::begin(StructConvergedManifest.vExcludedScrapers), std::end(StructConvergedManifest.vExcludedScrapers), iScraper.first) + == std::end(StructConvergedManifest.vExcludedScrapers) + && std::find(std::begin(StructConvergedManifest.vIncludedScrapers), std::end(StructConvergedManifest.vIncludedScrapers), iScraper.first) + == std::end(StructConvergedManifest.vIncludedScrapers)) + { + StructConvergedManifest.vScrapersNotPublishing.push_back(iScraper.first); + _log(logattribute::INFO, "ScraperConstructConvergedManifesByProject", "Scraper " + iScraper.first + " authorized but not publishing."); + } } } - } - if (fDebug3) _log(logattribute::INFO, "ENDLOCK", "CScraperManifest::cs_mapManifest"); + if (fDebug3) _log(logattribute::INFO, "ENDLOCK", "CScraperManifest::cs_mapManifest"); + } } if (!bConvergenceSuccessful) + { + // Reinitialize StructConvergedManifest. + StructConvergedManifest = {}; + _log(logattribute::INFO, "ScraperConstructConvergedManifestByProject", "No convergence on manifests by projects."); + } return bConvergenceSuccessful; - } @@ -4214,7 +4368,7 @@ mmCSManifestsBinnedByScraper ScraperDeleteCScraperManifests() + " from scraper source " + iter->first); // Delete from CScraperManifest map - ScraperDeleteCScraperManifest(iter_inner->second.first); + CScraperManifest::DeleteManifest(iter_inner->second.first); } } } @@ -4226,7 +4380,7 @@ mmCSManifestsBinnedByScraper ScraperDeleteCScraperManifests() if (GetAdjustedTime() - manifest.nTime > SCRAPER_CMANIFEST_RETENTION_TIME) { - _log(logattribute::INFO, "Scraper", "Deleting old CScraperManifest with hash " + iter->first.GetHex()); + _log(logattribute::INFO, "ScraperDeleteCScraperManifests", "Deleting old CScraperManifest with hash " + iter->first.GetHex()); // Delete from CScraperManifest map iter = CScraperManifest::DeleteManifest(iter); } @@ -4234,6 +4388,28 @@ mmCSManifestsBinnedByScraper ScraperDeleteCScraperManifests() ++iter; } + // Also delete old entries that have exceeded retention time from the ConvergedScraperStatsCache. This follows + // SCRAPER_CMANIFEST_RETENTION_TIME as well. + { + LOCK(cs_ConvergedScraperStatsCache); + if (fDebug3) _log(logattribute::INFO, "LOCK", "cs_ConvergedScraperStatsCache"); + + ConvergedScraperStatsCache.DeleteOldConvergenceFromPastConvergencesMap(); + + if (fDebug3) _log(logattribute::INFO, "ENDLOCK", "cs_ConvergedScraperStatsCache"); + } + + unsigned int nPendingDeleted = 0; + + _log(logattribute::INFO, "ScraperDeleteCScraperManifests", "Size of mapPendingDeletedManifest before delete = " + + std::to_string(CScraperManifest::mapPendingDeletedManifest.size())); + + // Clear old CScraperManifests out of mapPendingDeletedManifest. + nPendingDeleted = CScraperManifest::DeletePendingDeletedManifests(); + _log(logattribute::INFO, "ScraperDeleteCScraperManifests", "Permanently deleted " + std::to_string(nPendingDeleted) + " manifest(s) pending permanent deletion."); + _log(logattribute::INFO, "ScraperDeleteCScraperManifests", "Size of mapPendingDeletedManifest = " + + std::to_string(CScraperManifest::mapPendingDeletedManifest.size())); + // Reload mMapCSManifestsBinnedByScraper after deletions. This is not particularly efficient, but the map is not // that large. (The lock on CScraperManifest::cs_mapManifest is still held from above.) mMapCSManifestsBinnedByScraper = BinCScraperManifestsByScraper(); @@ -4251,6 +4427,9 @@ bool LoadBeaconListFromConvergedManifest(ConvergedManifest& StructConvergedManif // Find the beacon list. auto iter = StructConvergedManifest.ConvergedManifestPartsMap.find("BeaconList"); + // Bail if the beacon list is not found, or the part is zero size (missing referenced part) + if (iter == StructConvergedManifest.ConvergedManifestPartsMap.end() || iter->second.size() == 0) return false; + boostio::basic_array_source input_source(&iter->second[0], iter->second.size()); boostio::stream> ingzss(input_source); @@ -4297,16 +4476,6 @@ bool LoadBeaconListFromConvergedManifest(ConvergedManifest& StructConvergedManif } -// A lock should be taken on CScraperManifest::cs_mapManifest before calling this function. -bool ScraperDeleteCScraperManifest(uint256 nManifestHash) -{ - // This deletes a manifest from the map. - bool ret = CScraperManifest::DeleteManifest(nManifestHash); - - return ret; -} - - /*********************** * Neural Network * ************************/ @@ -4394,7 +4563,7 @@ std::string ScraperGetNeuralContract(bool bStoreConvergedStats, bool bContractDi // If not in sync then immediately bail with a empty string. if (fOutOfSyncByAge) return std::string(); - // Check the age of the ConvergedScraperStats cache. If less than nScraperSleep / 1000 old (for seconds), then simply report back the cache contents. + // Check the age of the ConvergedScraperStats cache. If less than nScraperSleep / 1000 old (for seconds) or clean, then simply report back the cache contents. // This prevents the relatively heavyweight stats computations from running too often. The time here may not exactly align with // the scraper loop if it is running, but that is ok. The scraper loop updates the time in the cache too. bool bConvergenceUpdateNeeded = true; @@ -4414,6 +4583,9 @@ std::string ScraperGetNeuralContract(bool bStoreConvergedStats, bool bContractDi BeaconMap mBeaconMap; std::string sSBCoreData; + // This is here to do new SB testing... + NN::Superblock superblock; + // if bConvergenceUpdate is needed, and... // If bContractDirectFromStatsUpdate is set to true, this means that this is being called from // ScraperSynchronizeDPOR() in fallback mode to force a single shot update of the stats files and @@ -4449,6 +4621,8 @@ std::string ScraperGetNeuralContract(bool bStoreConvergedStats, bool bContractDi LOCK(cs_ConvergedScraperStatsCache); if (fDebug3) _log(logattribute::INFO, "LOCK", "cs_ConvergedScraperStatsCache"); + ConvergedScraperStatsCache.AddConvergenceToPastConvergencesMap(); + std::string sSBCoreDataPrev = ConvergedScraperStatsCache.sContract; sSBCoreData = GenerateSBCoreDataFromScraperStats(mScraperConvergedStats); @@ -4459,6 +4633,10 @@ std::string ScraperGetNeuralContract(bool bStoreConvergedStats, bool bContractDi ConvergedScraperStatsCache.sContract = sSBCoreData; ConvergedScraperStatsCache.Convergence = StructConvergedManifest; + // This is here to do new SB testing... + superblock = NN::Superblock::FromConvergence(ConvergedScraperStatsCache); + ConvergedScraperStatsCache.NewFormatSuperblock = superblock; + // Mark the cache clean, because it was just updated. ConvergedScraperStatsCache.bClean = true; @@ -4555,10 +4733,10 @@ NN::Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats, bool bCon // NOTE - OutOfSyncByAge calls PreviousBlockAge(), which takes a lock on cs_main. This is likely a deadlock culprit if called from here // and the scraper or neuralnet loop nearly simultaneously. So we use an atomic flag updated by the scraper or neuralnet loop. - // If not in sync then immediately bail with a empty string. + // If not in sync then immediately bail with an empty superblock. if (fOutOfSyncByAge) return empty_superblock; - // Check the age of the ConvergedScraperStats cache. If less than nScraperSleep / 1000 old (for seconds), then simply report back the cache contents. + // Check the age of the ConvergedScraperStats cache. If less than nScraperSleep / 1000 old (for seconds) or clean, then simply report back the cache contents. // This prevents the relatively heavyweight stats computations from running too often. The time here may not exactly align with // the scraper loop if it is running, but that is ok. The scraper loop updates the time in the cache too. bool bConvergenceUpdateNeeded = true; @@ -4581,7 +4759,7 @@ NN::Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats, bool bCon // if bConvergenceUpdate is needed, and... // If bContractDirectFromStatsUpdate is set to true, this means that this is being called from // ScraperSynchronizeDPOR() in fallback mode to force a single shot update of the stats files and - // direct generation of the contract from the single shot run. This will return immediately with a blank if + // direct generation of the contract from the single shot run. This will return immediately with an empty SB if // IsScraperAuthorized() evaluates to false, because that means that by network policy, no non-scraper // stats downloads are allowed by unauthorized scraper nodes. // (If bConvergenceUpdate is not needed, then the scraper is operating by convergence already... @@ -4613,26 +4791,27 @@ NN::Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats, bool bCon LOCK(cs_ConvergedScraperStatsCache); if (fDebug3) _log(logattribute::INFO, "LOCK", "cs_ConvergedScraperStatsCache"); - NN::Superblock superblock_Prev = ConvergedScraperStatsCache.NewFormatSuperblock; + ConvergedScraperStatsCache.AddConvergenceToPastConvergencesMap(); - superblock = NN::Superblock::FromStats(mScraperConvergedStats); + NN::Superblock superblock_Prev = ConvergedScraperStatsCache.NewFormatSuperblock; ConvergedScraperStatsCache.mScraperConvergedStats = mScraperConvergedStats; ConvergedScraperStatsCache.nTime = GetAdjustedTime(); - ConvergedScraperStatsCache.nNewFormatSuperblockHash = ScraperGetSuperblockHash(superblock); - ConvergedScraperStatsCache.NewFormatSuperblock = superblock; ConvergedScraperStatsCache.Convergence = StructConvergedManifest; + superblock = NN::Superblock::FromConvergence(ConvergedScraperStatsCache); + ConvergedScraperStatsCache.NewFormatSuperblock = superblock; + // Mark the cache clean, because it was just updated. ConvergedScraperStatsCache.bClean = true; // Signal UI of SBContract status - if (superblock.GetSerializeSize(SER_NETWORK, 1)) + if (superblock.WellFormed()) { - if (superblock_Prev.GetSerializeSize(SER_NETWORK, 1)) + if (superblock_Prev.WellFormed()) { // If the current is not empty and the previous is not empty and not the same, then there is an updated contract. - if (ScraperGetSuperblockHash(superblock) != ScraperGetSuperblockHash(superblock_Prev)) + if (superblock.GetHash() != superblock_Prev.GetHash()) uiInterface.NotifyScraperEvent(scrapereventtypes::SBContract, CT_UPDATED, {}); } else @@ -4640,7 +4819,7 @@ NN::Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats, bool bCon uiInterface.NotifyScraperEvent(scrapereventtypes::SBContract, CT_NEW, {}); } else - if (superblock_Prev.GetSerializeSize(SER_NETWORK, 1)) + if (superblock_Prev.WellFormed()) // If the current is empty and the previous was not empty, then the contract has been deleted. uiInterface.NotifyScraperEvent(scrapereventtypes::SBContract, CT_DELETED, {}); @@ -4671,7 +4850,7 @@ NN::Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats, bool bCon superblock = NN::Superblock::FromStats(mScraperStats); // Signal the UI there is a contract. - if(superblock.GetSerializeSize(SER_NETWORK, 1)) + if(superblock.WellFormed()) uiInterface.NotifyScraperEvent(scrapereventtypes::SBContract, CT_NEW, {}); _log(logattribute::INFO, "ScraperGetNeuralContract", "Superblock object generated from single shot"); @@ -4727,23 +4906,6 @@ std::string ScraperGetNeuralHash(std::string sNeuralContract) return sHash; } -// Note: This is the native hash for SB ver 2+ (bv11+). -NN::QuorumHash ScraperGetSuperblockHash() -{ - NN::QuorumHash nSuperblockContractHash = NN::QuorumHash::Hash(ScraperGetSuperblockContract(false, false)); - - return nSuperblockContractHash; -} - -// Note: This is the native hash for SB ver 2+ (bv11+). -NN::QuorumHash ScraperGetSuperblockHash(NN::Superblock& superblock) -{ - NN::QuorumHash nSuperblockContractHash = NN::QuorumHash::Hash(superblock); - - return nSuperblockContractHash; -} - - bool ScraperSynchronizeDPOR() { bool bStatus = false; @@ -4774,6 +4936,480 @@ bool ScraperSynchronizeDPOR() return bStatus; } +//! +//! \brief The superblock validation function used for bv11+ (SB version 2+) +//! +//! This function supports four different levels of validation, and invalid +//! and unknown states. +//! +//! Invalid, +//! Unknown, +//! CurrentCachedConvergence, +//! CachedPastConvergence, +//! ManifestLevelConvergence, +//! ProjectLevelConvergence +//! +//! This is an enum class with six possible values. +//! +//! There are enumerated in increasing order of difficulty and decreasing +//! frequency. It is expected that the vast majority of time the superblock +//! should be validated from the current cached convergence or the cached +//! past convergence. In the case where a node has just started up when the +//! superblock comes in to be validated, there may be no cache entry that +//! corresponds to the incoming superblock contract. In that case we have to +//! fallback to trying to reconstruct the staking node's convergence at either +//! the manifest or project level from the received manifest and parts +//! information and see if the superblock formed from that matches the hash. +scraperSBvalidationtype ValidateSuperblock(const NN::Superblock& NewFormatSuperblock, bool bUseCache) +{ + // Calculate the hash of the superblock to validate. + NN::QuorumHash nNewFormatSuperblockHash = NewFormatSuperblock.GetHash(); + + // convergence hash hint (reduced hash) from superblock... (used for cached lookup) + uint32_t nReducedSBContentHash = NewFormatSuperblock.m_convergence_hint; + + // underlying manifest hash hint (reduced hash from superblock... (used for uncached lookup against manifests) + uint32_t nUnderlyingManifestReducedContentHash = 0; + if (!NewFormatSuperblock.ConvergedByProject()) nUnderlyingManifestReducedContentHash = NewFormatSuperblock.m_manifest_content_hint; + + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "NewFormatSuperblock.m_version = " + std::to_string(NewFormatSuperblock.m_version)); + + if (bUseCache) + { + // Retrieve current convergence superblock. This will have the effect of updating + // the convergence and populating the cache with the latest. If the cache has + // already been updated via the housekeeping loop and is clean, this will be trivial. + + NN::Superblock CurrentNodeSuperblock; + + if (false /* NewFormatSuperblock.m_version >= 2 */) + { + CurrentNodeSuperblock = ScraperGetSuperblockContract(true, false); + } + else + { + // Don't need the string return (old contract). Instead pick up the cached SB, which if updated would be + // the return value for ScraperGetSuperblockContract post bv11. + // This is really only for testing pre-bv11. + ScraperGetNeuralContract(true, false); + + LOCK(cs_ConvergedScraperStatsCache); + + CurrentNodeSuperblock = ConvergedScraperStatsCache.NewFormatSuperblock; + } + + // If there is no superblock returned, the node is either out of sync, or has not + // received enough manifests and parts to calculate a convergence. Return unknown. + if (!CurrentNodeSuperblock.WellFormed()) return scraperSBvalidationtype::Unknown; + + LOCK(cs_ConvergedScraperStatsCache); + + // First check and see if superblock contract hash is the current one in the cache. + if (fDebug3) + { + _log(logattribute::INFO, "ValidateSuperblock", "ConvergedScraperStatsCache.NewFormatSuperblock.GetHash() = " + + ConvergedScraperStatsCache.NewFormatSuperblock.GetHash().ToString()); + _log(logattribute::INFO, "ValidateSuperblock", "nNewFormatSuperblockHash = " + + nNewFormatSuperblockHash.ToString()); + } + if (ConvergedScraperStatsCache.NewFormatSuperblock.GetHash() == nNewFormatSuperblockHash) return scraperSBvalidationtype::CurrentCachedConvergence; + + // if not validated with current cached contract, then check past ones in the cache. + auto found = ConvergedScraperStatsCache.PastConvergences.find(nReducedSBContentHash); + + if (found != ConvergedScraperStatsCache.PastConvergences.end()) + { + if (found->second.first == nNewFormatSuperblockHash) return scraperSBvalidationtype::CachedPastConvergence; + } + } + + // Now for the hard stuff... the manifest level uncached case... borrowed from the ScraperConstructConvergedManifest function... + + // Call ScraperDeleteCScraperManifests(). This will return a map of manifests binned by Scraper after the culling. + mmCSManifestsBinnedByScraper mMapCSManifestsBinnedByScraper = ScraperDeleteCScraperManifests(); + + // Do a map for unique manifest times by content hash, then scraperID and manifest (not content) hash. + std::multimap> mManifestsBinnedbyContent; + + unsigned int nScraperCount = mMapCSManifestsBinnedByScraper.size(); + + // If the superblock indicates not converged by project, then reconstruct at the manifest level + // else reconstruct at the project level. + + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "NewFormatSuperblock.ConvergedByProject() = " + std::to_string(NewFormatSuperblock.ConvergedByProject())); + + if (!NewFormatSuperblock.ConvergedByProject()) + { + _log(logattribute::INFO, "ValidateSuperblock", "Number of Scrapers with manifests = " + std::to_string(nScraperCount)); + + for (const auto& iter : mMapCSManifestsBinnedByScraper) + { + // iter.second is the mCSManifest + for (const auto& iter_inner : iter.second) + { + // Insert into mManifestsBinnedByTime multimap. Iter_inner.first is the manifest time, + // iter_inner.second.second is the manifest CONTENT hash. + // mManifestsBinnedByTime.insert(std::make_pair(iter_inner.first, iter_inner.second.second)); + + // Even though this is a multimap on purpose because we are going to count occurances of the same key, + // We need to prevent the insertion of a second entry with the same content from the same scraper. This + // could otherwise happen if a scraper is shutdown and restarted, and it publishes a new manifest + // before it receives manifests from the other nodes (including its own prior manifests). + // ------------------------------------------------ manifest CONTENT hash + auto range = mManifestsBinnedbyContent.equal_range(iter_inner.second.second); + bool bAlreadyExists = false; + for (auto iter3 = range.first; iter3 != range.second; ++iter3) + { + // ---- ScraperID ------ Candidate scraperID to insert + if (iter3->second.first == iter.first) + bAlreadyExists = true; + } + + if (!bAlreadyExists) + { + // Insert into mManifestsBinnedbyContent ------------- content hash --------------------- ScraperID ------ manifest hash. + mManifestsBinnedbyContent.insert(std::make_pair(iter_inner.second.second, std::make_pair(iter.first, iter_inner.second.first))); + } + } + } + + // Find matching manifests (full hash) using the hint. We use a map here to prevent duplicates on purpose. Note that + // we need this, because there is a small possibility that the hint could match MORE THAN ONE content hash, because + // the reduced hash is only 32 bits. + // content hash - manifest hash + std::map mMatchingManifestContentHashes; + + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "nUnderlyingManifestReducedContentHash = " + std::to_string(nUnderlyingManifestReducedContentHash)); + + for (const auto& iter : mManifestsBinnedbyContent) + { + uint32_t nReducedManifestContentHash = iter.first.Get64() >> 32; + + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "nReducedManifestContentHash = " + std::to_string(nReducedManifestContentHash)); + + // This has the effect of only storing the first one of the series of matching manifests that match the hint, + // because of the insert. Below we will count the others matching to check for a supermajority. + if (nReducedManifestContentHash == nUnderlyingManifestReducedContentHash) mMatchingManifestContentHashes.insert(std::make_pair(iter.first, iter.second.second)); + } + + // For each of the matching full content hashes, count the number of manifests by full content hash. + // We continue until a group meets the supermajority rule--then there was an uncached past convergence and it is validated. + // This scenario is typically due to a node that was started late in the scraper manifest generation cycle, so it + // has all of the underlying manifests from the scrapers, but has not calculated and cached all of the convergences + // that would be calculated and cached on the node from running a long time with the nScraperSleep interval NN loop. + for (const auto& iter : mMatchingManifestContentHashes) + { + unsigned int nIdenticalContentManifestCount = mManifestsBinnedbyContent.count(iter.first); + + if (nIdenticalContentManifestCount >= NumScrapersForSupermajority(nScraperCount)) + { + + CScraperManifest CandidateManifest; + + { + LOCK(CScraperManifest::cs_mapManifest); + + auto found = CScraperManifest::mapManifest.find(iter.second); + + if (found != CScraperManifest::mapManifest.end()) + { + // This is a copy on purpose to minimize lock time. + CandidateManifest = *found->second; + } + else + { + continue; + } + } + + ScraperStats mScraperstats = GetScraperStatsFromSingleManifest(CandidateManifest); + + NN::Superblock superblock = NN::Superblock::FromStats(mScraperstats); + + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "superblock.m_version = " + std::to_string(superblock.m_version)); + + // This should really be done in the superblock class as an overload on NN::Superblock::FromConvergence. + superblock.m_convergence_hint = CandidateManifest.nContentHash.Get64() >> 32; + + NN::QuorumHash nCandidateSuperblockHash = superblock.GetHash(); + + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "Cached past convergence - nCandidateSuperblockHash = " + nCandidateSuperblockHash.ToString()); + if (fDebug3) _log(logattribute::INFO, "ValidateSuperblock", "Cached past convergence - nNewFormatSuperblockHash = " + nNewFormatSuperblockHash.ToString()); + + if (nCandidateSuperblockHash == nNewFormatSuperblockHash) return scraperSBvalidationtype::ManifestLevelConvergence; + } + } + } + else + { + // We are in converged by project validation mode... the harder stuff... + // borrowed from the ScraperConstructConvergedManifestByProject function... + + // Get the whitelist. + const NN::WhitelistSnapshot projectWhitelist = NN::GetWhitelist().Snapshot(); + + // Create a dummy converged manifest to use. + ConvergedManifest StructDummyConvergedManifest; + + CDataStream ss(SER_NETWORK,1); + uint256 nConvergedConsensusBlock = 0; + int64_t nConvergedConsensusTime = 0; + uint256 nManifestHashForConvergedBeaconList = 0; + + // We are going to do this for each project in the whitelist. + unsigned int iCountMatchedProjects = 0; + unsigned int iCountSuccessfulConvergedProjects = 0; + + _log(logattribute::INFO, "ValidateSuperblock", "Number of Scrapers with manifests = " + std::to_string(nScraperCount)); + + // Content (Part) Hash - Project + std::map mMatchingProjectPartHashes; + + + // TODO: Should this be based on the list of projects in the SB? (Equivalent to a cached whitelist state.) + for (const auto& iWhitelistProject : projectWhitelist) + { + // Do a map for unique ProjectObject times ordered by descending time then content hash. Note that for Project Objects (Parts), + // the content hash is the object hash. We also need the consensus block here, because we are "composing" the manifest by + // parts, so we will need to choose the latest consensus block by manifest time. This will occur naturally below if tracked in + // this manner. We will also want the BeaconList from the associated manifest. + // ------ manifest time --- object hash - consensus block hash - manifest hash. + std::multimap, greater> mProjectObjectsBinnedByTime; + // and also by project object (content) hash, then scraperID and project. + std::multimap> mProjectObjectsBinnedbyContent; + //std::multimap>::iterator ProjectConvergence; + + { + LOCK(CScraperManifest::cs_mapManifest); + if (fDebug3) _log(logattribute::INFO, "LOCK", "CScraperManifest::cs_mapManifest"); + + // For the selected project in the whitelist, walk each scraper. + for (const auto& iter : mMapCSManifestsBinnedByScraper) + { + // iter.second is the mCSManifest. Walk each manifest in each scraper. + for (const auto& iter_inner : iter.second) + { + // This is the referenced CScraperManifest hash + uint256 nCSManifestHash = iter_inner.second.first; + + // Select manifest based on provided hash. + auto pair = CScraperManifest::mapManifest.find(nCSManifestHash); + CScraperManifest& manifest = *pair->second; + + // Find the part number in the manifest that corresponds to the whitelisted project. + // Once we find a part that corresponds to the selected project in the given manifest, then break, + // because there can only be one part in a manifest corresponding to a given project. + int nPart = -1; + int64_t nProjectObjectTime = 0; + uint256 nProjectObjectHash = 0; + for (const auto& vectoriter : manifest.projects) + { + if (vectoriter.project == iWhitelistProject.m_name) + { + nPart = vectoriter.part1; + nProjectObjectTime = vectoriter.LastModified; + break; + } + } + + // Part -1 means not found, Part 0 is the beacon list, so needs to be greater than zero. + if (nPart > 0) + { + // Get the hash of the part referenced in the manifest. + nProjectObjectHash = manifest.vParts[nPart]->hash; + + // Insert into mManifestsBinnedByTime multimap. + mProjectObjectsBinnedByTime.insert(std::make_pair(nProjectObjectTime, std::make_tuple(nProjectObjectHash, manifest.ConsensusBlock, *manifest.phash))); + + // Even though this is a multimap on purpose because we are going to count occurances of the same key, + // We need to prevent the insertion of a second entry with the same content from the same scraper. This is + // even more true here at the part level than at the manifest level, because if both SCRAPER_CMANIFEST_RETAIN_NONCURRENT + // and SCRAPER_CMANIFEST_INCLUDE_NONCURRENT_PROJ_FILES are true, then there can be many references + // to the same part by different manifests of the same scraper in addition to across scrapers. + auto range = mProjectObjectsBinnedbyContent.equal_range(nProjectObjectHash); + bool bAlreadyExists = false; + for (auto iter3 = range.first; iter3 != range.second; ++iter3) + { + // ---- ScraperID ------ Candidate scraperID to insert + if (iter3->second.first == iter.first) + bAlreadyExists = true; + } + + if (!bAlreadyExists) + { + // Insert into mProjectObjectsBinnedbyContent -------- content hash ------------------- ScraperID -------- Project. + mProjectObjectsBinnedbyContent.insert(std::make_pair(nProjectObjectHash, std::make_pair(iter.first, iWhitelistProject.m_name))); + } + } + } + } + + bool bMatched = false; + + // Get an iterator pointing to the project in the project index map and match to the hint. + if (const auto SBProjectIter = NewFormatSuperblock.m_projects.Try(iWhitelistProject.m_name)) + { + // Pull the convergence hint (reduced content hash) for the project object. + uint32_t nReducedSBProjectObjectContentHash = SBProjectIter->m_convergence_hint; + + for (const auto& iter : mProjectObjectsBinnedbyContent) + { + uint32_t nReducedProjectObjectContentHash = iter.first.Get64() >> 32; + + // This has the effect of only storing the first one of the series of matching project parts (from different scrapers) with the same + // full hash that match the hint, because of the insert. Below we will count the others matching to check for a supermajority. + // Note that two different full hashes that match the hint for the same project would be included. + if (nReducedProjectObjectContentHash == nReducedSBProjectObjectContentHash) + { + // ------------------------------------------ full content hash ---- project name + mMatchingProjectPartHashes.insert(std::make_pair(iter.first, iWhitelistProject.m_name)); + bMatched = true; + } + } + } + + // Keep track of the number of matched projects. + if (bMatched) ++iCountMatchedProjects; + + if (fDebug3) _log(logattribute::INFO, "ENDLOCK", "CScraperManifest::cs_mapManifest"); + } + + uint256 nLatestProjectPartMatchingHash; + int64_t nLatestProjectPartMatchingTime = 0; + uint256 nLatestProjectPartMatchingConsensusBlock; + uint256 nLatestProjectPartMatchingManifestForBeaconList; + + for (const auto& iter : mMatchingProjectPartHashes) + { + // We want the most recent (latest) part, constrained to the above map. This is slightly different from. + // but similar to the construction of the convergence without constraint. + for (const auto& iter_inner : mProjectObjectsBinnedByTime) + { + // Only go until the first match (latest), then break. + if (std::get<0>(iter_inner.second) == iter.first) + { + nLatestProjectPartMatchingHash = iter.first; + nLatestProjectPartMatchingTime = iter_inner.first; + nLatestProjectPartMatchingConsensusBlock = get<1>(iter_inner.second); + nLatestProjectPartMatchingManifestForBeaconList = get<2>(iter_inner.second); + + break; + } + } + } + + unsigned int nIdenticalContentManifestCount = mProjectObjectsBinnedbyContent.count(nLatestProjectPartMatchingHash); + + if (nIdenticalContentManifestCount >= NumScrapersForSupermajority(nScraperCount)) + { + LOCK(CScraperManifest::cs_mapParts); + + // Get the actual part ----------------- by object hash. + auto iPart = CSplitBlob::mapParts.find(nLatestProjectPartMatchingHash); + + uint256 nContentHashCheck = Hash(iPart->second.data.begin(), iPart->second.data.end()); + + if (nContentHashCheck != iPart->first) + { + _log(logattribute::ERR, "ValidateSuperblock", "Selected Converged Project Object content hash check failed! nContentHashCheck = " + + nContentHashCheck.GetHex() + " and nContentHash = " + iPart->first.GetHex()); + + // Bail. + return scraperSBvalidationtype::Invalid; + } + + // Put Project Object (Part) in Dummy StructConvergedManifest keyed by project. + StructDummyConvergedManifest.ConvergedManifestPartsMap.insert(std::make_pair(iWhitelistProject.m_name, iPart->second.data)); + + // If the indirectly referenced manifest has a consensus time that is greater than already recorded, replace with that time, and also + // change the consensus block to the referred to consensus block. (Note that this is scoped at even above the individual project level, so + // the result after iterating through all projects will be the latest manifest time and consensus block that corresponds to any of the + // parts that meet convergence.) We will also get the manifest hash too, so we can retrieve the associated BeaconList that was used. + // This version is subject to the constraints of the hint by the above code, but operates similar to a normal convergence construction. + if (nLatestProjectPartMatchingTime > nConvergedConsensusTime) + { + nConvergedConsensusTime = nLatestProjectPartMatchingTime; + nConvergedConsensusBlock = nLatestProjectPartMatchingConsensusBlock; + nManifestHashForConvergedBeaconList = nLatestProjectPartMatchingManifestForBeaconList; + } + + // Keep track of the number of matched projects that go in the convergence. + ++iCountSuccessfulConvergedProjects; + } + } + + // If we have a three way match, then proceed with the construction of local candidate SB + // to validate, otherwise there is no validation. + if (iCountSuccessfulConvergedProjects == iCountMatchedProjects && iCountMatchedProjects == NewFormatSuperblock.m_projects.size()) + { + // Fill out the the rest of the ConvergedManifest structure. Note this assumes one-to-one part to project statistics BLOB. Needs to + // be fixed for more than one part per BLOB. This is easy in this case, because it is all from/referring to one manifest. + + // Lets use the BeaconList from the manifest referred to by nManifestHashForConvergedBeaconList. Technically there is no exact answer to + // the BeaconList that should be used in the convergence when putting it together at the individual part level, because each project part + // could have used a different BeaconList (subject to the consensus ladder. It makes sense to use the "newest" one that is associated + // with a manifest that has the newest part associated with a successful part (project) level convergence. + + LOCK(CScraperManifest::cs_mapManifest); + if (fDebug3) _log(logattribute::INFO, "LOCK", "CScraperManifest::cs_mapManifest"); + + // Select manifest based on provided hash. + auto pair = CScraperManifest::mapManifest.find(nManifestHashForConvergedBeaconList); + CScraperManifest& manifest = *pair->second; + + // The vParts[0] is always the BeaconList. + StructDummyConvergedManifest.ConvergedManifestPartsMap.insert(std::make_pair("BeaconList", manifest.vParts[0]->data)); + + StructDummyConvergedManifest.ConsensusBlock = nConvergedConsensusBlock; + + // The ConvergedManifest content hash is in the order of the map key and on the data. + for (const auto& iter : StructDummyConvergedManifest.ConvergedManifestPartsMap) + ss << iter.second; + + StructDummyConvergedManifest.nContentHash = Hash(ss.begin(), ss.end()); + StructDummyConvergedManifest.timestamp = GetAdjustedTime(); + StructDummyConvergedManifest.bByParts = true; + + _log(logattribute::INFO, "ValidateSuperblock", "Successful convergence by project: " + + std::to_string(iCountSuccessfulConvergedProjects) + " out of " + std::to_string(projectWhitelist.size()) + + " projects at " + + DateTimeStrFormat("%x %H:%M:%S", StructDummyConvergedManifest.timestamp)); + + + if (fDebug3) _log(logattribute::INFO, "ENDLOCK", "CScraperManifest::cs_mapManifest"); + + ScraperStats mScraperstats = GetScraperStatsByConvergedManifest(StructDummyConvergedManifest); + + NN::Superblock superblock = NN::Superblock::FromStats(mScraperstats); + + // This should really be done in the superblock class as an overload on NN::Superblock::FromConvergence. + superblock.m_convergence_hint = StructDummyConvergedManifest.nContentHash.Get64() >> 32; + + if (StructDummyConvergedManifest.bByParts) + { + NN::Superblock::ProjectIndex& projects = superblock.m_projects; + + // Add hints created from the hashes of converged manifest parts to each + // superblock project section to assist receiving nodes with validation: + // + for (const auto& part_pair : StructDummyConvergedManifest.ConvergedManifestPartsMap) + { + const std::string& project_name = part_pair.first; + const CSerializeData& part_data = part_pair.second; + + projects.SetHint(project_name, part_data); + } + } + + NN::QuorumHash nCandidateSuperblockHash = superblock.GetHash(); + + if (nCandidateSuperblockHash == nNewFormatSuperblockHash) return scraperSBvalidationtype::ProjectLevelConvergence; + } // If you fall out of this if statement... no validation. + } + + // If we make it here, there is no validation. + return scraperSBvalidationtype::Invalid; +} /*********************** * RPC Functions * @@ -4828,7 +5464,7 @@ UniValue deletecscrapermanifest(const UniValue& params, bool fHelp) LOCK(CScraperManifest::cs_mapManifest); if (fDebug3) _log(logattribute::INFO, "LOCK", "CScraperManifest::cs_mapManifest"); - bool ret = ScraperDeleteCScraperManifest(uint256(params[0].get_str())); + bool ret = CScraperManifest::DeleteManifest(uint256(params[0].get_str()), true); if (fDebug3) _log(logattribute::INFO, "ENDLOCK", "CScraperManifest::cs_mapManifest"); @@ -4863,19 +5499,30 @@ UniValue testnewsb(const UniValue& params, bool fHelp) "Test the new Superblock class.\n" ); - LOCK(cs_ConvergedScraperStatsCache); + { + LOCK(cs_ConvergedScraperStatsCache); - if (ConvergedScraperStatsCache.sContract.empty()) - throw std::runtime_error( - "Wait until a convergence is formed.\n" - ); + if (ConvergedScraperStatsCache.sContract.empty()) + throw std::runtime_error( + "Wait until a convergence is formed.\n" + ); + } UniValue res(UniValue::VOBJ); + _log(logattribute::INFO, "testnewsb", "Size of the PastConvergences map = " + std::to_string(ConvergedScraperStatsCache.PastConvergences.size())); + res.pushKV("Size of the PastConvergences map", std::to_string(ConvergedScraperStatsCache.PastConvergences.size())); + // Contract binary pack/unpack check... _log(logattribute::INFO, "testnewsb", "Checking compatibility with binary SB pack/unpack by packing then unpacking, then comparing to the original"); - std::string& sSBCoreData = ConvergedScraperStatsCache.sContract; + std::string sSBCoreData; + + { + LOCK(cs_ConvergedScraperStatsCache); + + sSBCoreData = ConvergedScraperStatsCache.sContract; + } std::string sPackedSBCoreData = PackBinarySuperblock(sSBCoreData); std::string sSBCoreData_out = UnpackBinarySuperblock(sPackedSBCoreData); @@ -4903,11 +5550,20 @@ UniValue testnewsb(const UniValue& params, bool fHelp) CDataStream ss(SER_NETWORK, 1); uint64_t nNewFormatSuperblockSerSize; uint64_t nNewFormatSuperblock_outSerSize; - uint256 nNewFormatSuperblockHash; - uint256 nNewFormatSuperblock_outHash; + NN::QuorumHash nNewFormatSuperblockHash; + NN::QuorumHash nNewFormatSuperblock_outHash; + uint32_t nNewFormatSuperblockReducedContentHashFromConvergenceHint; + uint32_t nNewFormatSuperblockReducedContentHashFromUnderlyingManifestHint; + + { + LOCK(cs_ConvergedScraperStatsCache); - NewFormatSuperblock = NN::Superblock::FromStats(ConvergedScraperStatsCache.mScraperConvergedStats); - NewFormatSuperblock.m_timestamp = ConvergedScraperStatsCache.nTime; + NewFormatSuperblock = NN::Superblock::FromConvergence(ConvergedScraperStatsCache); + // NewFormatSuperblock = NN::Superblock::FromStats(ConvergedScraperStatsCache.mScraperConvergedStats); + NewFormatSuperblock.m_timestamp = ConvergedScraperStatsCache.nTime; + + _log(logattribute::INFO, "testnewsb", "ConvergedScraperStatsCache.Convergence.bByParts = " + std::to_string(ConvergedScraperStatsCache.Convergence.bByParts)); + } _log(logattribute::INFO, "testnewsb", "m_projects size = " + std::to_string(NewFormatSuperblock.m_projects.size())); res.pushKV("m_projects size", (uint64_t) NewFormatSuperblock.m_projects.size()); @@ -4917,7 +5573,13 @@ UniValue testnewsb(const UniValue& params, bool fHelp) res.pushKV("zero-mag count", (uint64_t) NewFormatSuperblock.m_cpids.Zeros()); nNewFormatSuperblockSerSize = NewFormatSuperblock.GetSerializeSize(SER_NETWORK, 1); - nNewFormatSuperblockHash = SerializeHash(NewFormatSuperblock); + nNewFormatSuperblockHash = NewFormatSuperblock.GetHash(); + + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock.m_version = " + std::to_string(NewFormatSuperblock.m_version)); + res.pushKV("NewFormatSuperblock.m_version", (uint64_t) NewFormatSuperblock.m_version); + + nNewFormatSuperblockReducedContentHashFromConvergenceHint = NewFormatSuperblock.m_convergence_hint; + nNewFormatSuperblockReducedContentHashFromUnderlyingManifestHint = NewFormatSuperblock.m_manifest_content_hint; _log(logattribute::INFO, "testnewsb", "nNewFormatSuperblockSerSize = " + std::to_string(nNewFormatSuperblockSerSize)); res.pushKV("nNewFormatSuperblockSerSize", nNewFormatSuperblockSerSize); @@ -4926,12 +5588,12 @@ UniValue testnewsb(const UniValue& params, bool fHelp) ss >> NewFormatSuperblock_out; nNewFormatSuperblock_outSerSize = NewFormatSuperblock_out.GetSerializeSize(SER_NETWORK, 1); - nNewFormatSuperblock_outHash = SerializeHash(NewFormatSuperblock_out); + nNewFormatSuperblock_outHash = NewFormatSuperblock_out.GetHash(); _log(logattribute::INFO, "testnewsb", "nNewFormatSuperblock_outSerSize = " + std::to_string(nNewFormatSuperblock_outSerSize)); res.pushKV("nNewFormatSuperblock_outSerSize", nNewFormatSuperblock_outSerSize); - if (nNewFormatSuperblockHash == nNewFormatSuperblock_outHash) + if (NewFormatSuperblock.GetHash() == nNewFormatSuperblock_outHash) { _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock serialization passed."); res.pushKV("NewFormatSuperblock serialization", "passed"); @@ -4942,12 +5604,20 @@ UniValue testnewsb(const UniValue& params, bool fHelp) res.pushKV("NewFormatSuperblock serialization", "FAILED"); } - NewFormatSuperblock = NN::Superblock::UnpackLegacy(sPackedSBCoreData); - NN::QuorumHash new_legacy_hash = NN::QuorumHash::Hash(NewFormatSuperblock); + NN::Superblock NewFormatSuperblockFromLegacy = NN::Superblock::UnpackLegacy(sPackedSBCoreData); + NN::QuorumHash new_legacy_hash = NN::QuorumHash::Hash(NewFormatSuperblockFromLegacy); std::string old_legacy_hash = GetQuorumHash(sSBCoreData_out); + res.pushKV("NewFormatSuperblockHash", nNewFormatSuperblockHash.ToString()); + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblockHash = " + nNewFormatSuperblockHash.ToString()); res.pushKV("new_legacy_hash", new_legacy_hash.ToString()); + _log(logattribute::INFO, "testnewsb", "new_legacy_hash = " + new_legacy_hash.ToString()); res.pushKV("old_legacy_hash", old_legacy_hash); + _log(logattribute::INFO, "testnewsb", "old_legacy_hash = " + old_legacy_hash); + res.pushKV("nNewFormatSuperblockReducedContentHashFromConvergenceHint", (uint64_t) nNewFormatSuperblockReducedContentHashFromConvergenceHint); + _log(logattribute::INFO, "testnewsb", "nNewFormatSuperblockReducedContentHashFromConvergenceHint = " + std::to_string(nNewFormatSuperblockReducedContentHashFromConvergenceHint)); + res.pushKV("nNewFormatSuperblockReducedContentHashFromUnderlyingManifestHint", (uint64_t) nNewFormatSuperblockReducedContentHashFromUnderlyingManifestHint); + _log(logattribute::INFO, "testnewsb", "nNewFormatSuperblockReducedContentHashFromUnderlyingManifestHint = " + std::to_string(nNewFormatSuperblockReducedContentHashFromUnderlyingManifestHint)); if (new_legacy_hash == old_legacy_hash) { _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock legacy hash passed."); @@ -4958,6 +5628,170 @@ UniValue testnewsb(const UniValue& params, bool fHelp) } _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock legacy unpack number of zero mags = " + std::to_string(NewFormatSuperblock.m_cpids.Zeros())); + res.pushKV("NewFormatSuperblock legacy unpack number of zero mags", std::to_string(NewFormatSuperblock.m_cpids.Zeros())); + + // + // ValidateSuperblock() reference function tests (current convergence) + // + + scraperSBvalidationtype validity = ::ValidateSuperblock(NewFormatSuperblock, true); + + if (validity != scraperSBvalidationtype::Invalid && validity != scraperSBvalidationtype::Unknown) + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against current (using cache) passed - " + GetTextForscraperSBvalidationtype(validity)); + res.pushKV("NewFormatSuperblock validation against current (using cache)", "passed - " + GetTextForscraperSBvalidationtype(validity)); + } + else + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against current (using cache) failed - " + GetTextForscraperSBvalidationtype(validity)); + res.pushKV("NewFormatSuperblock validation against current (using cache)", "failed - " + GetTextForscraperSBvalidationtype(validity)); + } + + scraperSBvalidationtype validity2 = ::ValidateSuperblock(NewFormatSuperblock, false); + + if (validity2 != scraperSBvalidationtype::Invalid && validity2 != scraperSBvalidationtype::Unknown) + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against current (without using cache) passed - " + GetTextForscraperSBvalidationtype(validity2)); + res.pushKV("NewFormatSuperblock validation against current (without using cache)", "passed - " + GetTextForscraperSBvalidationtype(validity2)); + } + else + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against current (without using cache) failed - " + GetTextForscraperSBvalidationtype(validity2)); + res.pushKV("NewFormatSuperblock validation against current (without using cache)", "failed - " + GetTextForscraperSBvalidationtype(validity2)); + } + + // + // SuperblockValidator class tests (current convergence) + // + + if (NN::ValidateSuperblock(NewFormatSuperblock)) + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against current (using cache) passed"); + res.pushKV("NN::ValidateSuperblock validation against current (using cache)", "passed"); + } + else + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against current (using cache) failed"); + res.pushKV("NN::ValidateSuperblock validation against current (using cache)", "failed"); + } + + if (NN::ValidateSuperblock(NewFormatSuperblock, false)) + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against current (without using cache) passed"); + res.pushKV("NN::ValidateSuperblock validation against current (without using cache)", "passed"); + } + else + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against current (without using cache) failed"); + res.pushKV("NN::ValidateSuperblock validation against current (without using cache)", "failed"); + } + + ConvergedManifest RandomPastConvergedManifest; + bool bPastConvergencesEmpty = true; + + { + LOCK(cs_ConvergedScraperStatsCache); + + auto iPastSB = ConvergedScraperStatsCache.PastConvergences.begin(); + + unsigned int PastConvergencesSize = ConvergedScraperStatsCache.PastConvergences.size(); + + if (PastConvergencesSize > 1) + { + std::default_random_engine generator; + std::uniform_int_distribution distribution(0, PastConvergencesSize - 1); + + std::advance(iPastSB, distribution(generator)); + + RandomPastConvergedManifest = iPastSB->second.second; + + bPastConvergencesEmpty = false; + } + + } + + if (!bPastConvergencesEmpty) + { + ScraperStats RandomPastSBStats = GetScraperStatsByConvergedManifest(RandomPastConvergedManifest); + + NN::Superblock RandomPastSB = NN::Superblock::FromStats(RandomPastSBStats); + + // This should really be done in the superblock class as an overload on NN::Superblock::FromConvergence. + RandomPastSB.m_convergence_hint = RandomPastConvergedManifest.nContentHash.Get64() >> 32; + RandomPastSB.m_manifest_content_hint = RandomPastConvergedManifest.nUnderlyingManifestContentHash.Get64() >> 32; + + if (RandomPastConvergedManifest.bByParts) + { + NN::Superblock::ProjectIndex& projects = RandomPastSB.m_projects; + + // Add hints created from the hashes of converged manifest parts to each + // superblock project section to assist receiving nodes with validation: + // + for (const auto& part_pair : RandomPastConvergedManifest.ConvergedManifestPartsMap) + { + const std::string& project_name = part_pair.first; + const CSerializeData& part_data = part_pair.second; + + projects.SetHint(project_name, part_data); + } + } + + // + // ValidateSuperblock() reference function tests (past convergence) + // + + scraperSBvalidationtype validity3 = ::ValidateSuperblock(RandomPastSB, true); + + if (validity3 != scraperSBvalidationtype::Invalid && validity != scraperSBvalidationtype::Unknown) + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against random past (using cache) passed - " + GetTextForscraperSBvalidationtype(validity3)); + res.pushKV("NewFormatSuperblock validation against random past (using cache)", "passed - " + GetTextForscraperSBvalidationtype(validity3)); + } + else + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against random past (using cache) failed - " + GetTextForscraperSBvalidationtype(validity3)); + res.pushKV("NewFormatSuperblock validation against random past (using cache)", "failed - " + GetTextForscraperSBvalidationtype(validity3)); + } + + scraperSBvalidationtype validity4 = ::ValidateSuperblock(RandomPastSB, false); + + if (validity4 != scraperSBvalidationtype::Invalid && validity != scraperSBvalidationtype::Unknown) + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against random past (without using cache) passed - " + GetTextForscraperSBvalidationtype(validity4)); + res.pushKV("NewFormatSuperblock validation against random past (without using cache)", "passed - " + GetTextForscraperSBvalidationtype(validity4)); + } + else + { + _log(logattribute::INFO, "testnewsb", "NewFormatSuperblock validation against random past (without using cache) failed - " + GetTextForscraperSBvalidationtype(validity4)); + res.pushKV("NewFormatSuperblock validation against random past (without using cache)", "failed - " + GetTextForscraperSBvalidationtype(validity4)); + } + + // + // SuperblockValidator class tests (past convergence) + // + + if (NN::ValidateSuperblock(RandomPastSB)) + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against random past (using cache) passed"); + res.pushKV("NN::ValidateSuperblock validation against random past (using cache)", "passed"); + } + else + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against random past (using cache) failed"); + res.pushKV("NN::ValidateSuperblock validation against random past (using cache)", "failed"); + } + + if (NN::ValidateSuperblock(RandomPastSB, false)) + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against random past (without using cache) passed"); + res.pushKV("NN::ValidateSuperblock validation against random past (without using cache)", "passed"); + } + else + { + _log(logattribute::INFO, "testnewsb", "NN::ValidateSuperblock validation against random past (without using cache) failed"); + res.pushKV("NN::ValidateSuperblock validation against random past (without using cache)", "failed"); + } + } return res; } diff --git a/src/scraper/scraper.h b/src/scraper/scraper.h index af42e01274..c4437804ff 100644 --- a/src/scraper/scraper.h +++ b/src/scraper/scraper.h @@ -45,9 +45,11 @@ namespace boostio = boost::iostreams; // These can get overridden by the GetArgs in init.cpp or ScraperApplyAppCacheEntries. // The appcache entries will take precedence. -// The amount of time to wait between scraper loop runs. +// The amount of time to wait between scraper loop runs. This is in +// milliseconds. unsigned int nScraperSleep = 300000; -// The amount of time before SB is due to start scraping. +// The amount of time before SB is due to start scraping. This is in +// seconds. unsigned int nActiveBeforeSB = 14400; // Explorer mode flag. Only effective if scraper is active. @@ -120,16 +122,32 @@ bool IsScraperAuthorizedToBroadcastManifests(CBitcoinAddress& AddressOut, CKey& std::string ScraperGetNeuralContract(bool bStoreConvergedStats = false, bool bContractDirectFromStatsUpdate = false); NN::Superblock ScraperGetSuperblockContract(bool bStoreConvergedStats = false, bool bContractDirectFromStatsUpdate = false); std::string ScraperGetNeuralHash(); -NN::QuorumHash ScraperGetSuperblockHash(); bool ScraperSynchronizeDPOR(); +scraperSBvalidationtype ValidateSuperblock(const NN::Superblock& NewFormatSuperblock, bool bUseCache = true); static std::vector vstatsobjecttypestrings = { "NetWorkWide", "byCPID", "byProject", "byCPIDbyProject" }; +static std::vector scraperSBvalidationtypestrings = { + "Invalid", + "Unknown", + "CurrentCachedConvergence", + "CachedPastConvergence", + "ManifestLevelConvergence", + "ProjectLevelConvergence" +}; + + const std::string GetTextForstatsobjecttype(statsobjecttype StatsObjType) { return vstatsobjecttypestrings[static_cast(StatsObjType)]; } +const std::string GetTextForscraperSBvalidationtype(scraperSBvalidationtype ScraperSBValidationType) +{ + return scraperSBvalidationtypestrings[static_cast(ScraperSBValidationType)]; +} + + double MagRound(double dMag) { return round(dMag / MAG_ROUND) * MAG_ROUND; diff --git a/src/scraper_net.cpp b/src/scraper_net.cpp index 528ab8e977..25612a6630 100644 --- a/src/scraper_net.cpp +++ b/src/scraper_net.cpp @@ -18,23 +18,28 @@ //Globals std::map CSplitBlob::mapParts; +CCriticalSection CSplitBlob::cs_mapParts; std::map< uint256, std::unique_ptr > CScraperManifest::mapManifest; +std::map>> CScraperManifest::mapPendingDeletedManifest; CCriticalSection CScraperManifest::cs_mapManifest; extern unsigned int SCRAPER_MISBEHAVING_NODE_BANSCORE; extern int64_t SCRAPER_DEAUTHORIZED_BANSCORE_GRACE_PERIOD; +extern int64_t SCRAPER_CMANIFEST_RETENTION_TIME; +extern unsigned int nScraperSleep; extern AppCacheSectionExt mScrapersExt; extern std::atomic nSyncTime; extern ConvergedScraperStats ConvergedScraperStatsCache; extern CCriticalSection cs_mScrapersExt; extern CCriticalSection cs_ConvergedScraperStatsCache; +// A lock needs to be taken on cs_mapParts before calling this function. bool CSplitBlob::RecvPart(CNode* pfrom, CDataStream& vRecv) { - /* Part of larger hashed blob. Currently only used for scraper data sharing. + /* Part of larger hashed blob. Currently only used for scraper data sharing. * retrive parent object from mapBlobParts * notify object or ignore if no object found * erase from mapAlreadyAskedFor - */ + */ auto& ss= vRecv; uint256 hash(Hash(ss.begin(), ss.end())); mapAlreadyAskedFor.erase(CInv(MSG_PART,hash)); @@ -166,20 +171,19 @@ bool CScraperManifest::AlreadyHave(CNode* pfrom, const CInv& inv) } if( MSG_SCRAPERINDEX !=inv.type ) { - /* For any other objects, just say that we do not need it: */ + // For any other objects, just say that we do not need it: return true; } - /* Inv-entory notification about scraper data index - * see if we already have it - * if yes, relay pfrom to Parts system as a fetch source and return true - * else return false - */ - + // Inventory notification about scraper data index--see if we already have it. + // If yes, relay pfrom to Parts system as a fetch source and return true + // else return false. auto found = mapManifest.find(inv.hash); if( found!=mapManifest.end() ) { - found->second->UseAsSource(pfrom); + // Only record UseAsSource if manifest is current to avoid spurious parts. + if (found->second->IsManifestCurrent()) found->second->UseAsSource(pfrom); + return true; } else @@ -412,11 +416,25 @@ void CScraperManifest::UnserializeCheck(CReaderStream& ss, unsigned int& banscor addPart(ph); } +bool CScraperManifest::IsManifestCurrent() const +{ + // This checks to see if the manifest is current, i.e. not about to be deleted. + return (nTime >= GetAdjustedTime() - SCRAPER_CMANIFEST_RETENTION_TIME + (int64_t) nScraperSleep / 1000); +} + + // A lock must be taken on cs_mapManifest before calling this function. -bool CScraperManifest::DeleteManifest(const uint256& nHash) +bool CScraperManifest::DeleteManifest(const uint256& nHash, const bool& fImmediate) { - if (mapManifest.erase(nHash)) + bool fDeleted = false; + + auto iter = mapManifest.find(nHash); + + if(iter != mapManifest.end()) { + if (!fImmediate) mapPendingDeletedManifest[nHash] = std::make_pair(GetAdjustedTime(), std::move(iter->second)); + mapManifest.erase(nHash); + // lock cs_ConvergedScraperStatsCache and mark ConvergedScraperStatsCache dirty because a manifest has been deleted // that could have been used in the cached convergence, so the convergence may change. { @@ -425,18 +443,52 @@ bool CScraperManifest::DeleteManifest(const uint256& nHash) ConvergedScraperStatsCache.bClean = false; } - return true; + fDeleted = true; } - else + + return fDeleted; +} + +// A lock must be taken on cs_mapManifest before calling this function. +std::map>::iterator CScraperManifest::DeleteManifest(std::map>::iterator& iter, + const bool& fImmediate) +{ + if (!fImmediate) mapPendingDeletedManifest[iter->first] = std::make_pair(GetAdjustedTime(), std::move(iter->second)); + iter = mapManifest.erase(iter); + + // lock cs_ConvergedScraperStatsCache and mark ConvergedScraperStatsCache dirty because a manifest has been deleted + // that could have been used in the cached convergence, so the convergence may change. This is not conditional, because the + // iterator must be valid. { - return false; + LOCK(cs_ConvergedScraperStatsCache); + + ConvergedScraperStatsCache.bClean = false; } + return iter; } // A lock must be taken on cs_mapManifest before calling this function. -std::map>::iterator CScraperManifest::DeleteManifest(std::map>::iterator& iter) +unsigned int CScraperManifest::DeletePendingDeletedManifests() { - return mapManifest.erase(iter); + unsigned int nDeleted = 0; + int64_t nDeleteThresholdTime = GetAdjustedTime() - nScraperSleep / 1000; + + std::map>>::iterator iter; + for (iter = mapPendingDeletedManifest.begin(); iter != mapPendingDeletedManifest.end();) + { + // Delete any entry more than nScraperSleep old. + if (iter->second.first < nDeleteThresholdTime) + { + iter = mapPendingDeletedManifest.erase(iter); + ++nDeleted; + } + else + { + ++iter; + } + } + + return nDeleted; } // A lock must be taken on cs_mapManifest before calling this function. @@ -504,7 +556,10 @@ bool CScraperManifest::RecvManifest(CNode* pfrom, CDataStream& vRecv) manifest.Complete(); } else { /* else request missing parts from the sender */ - manifest.UseAsSource(pfrom); + // Note: As an additional buffer to prevent spurious part receipts, if the manifest timestamp is within nScraperSleep of expiration (i.e. + // about to go on the pending delete list, then do not request missing parts, as it is possible that the manifest will be deleted + // by the housekeeping loop in between the receipt of the manifest, request for parts, and receipt of parts otherwise. + if (manifest.IsManifestCurrent()) manifest.UseAsSource(pfrom); } return true; } diff --git a/src/scraper_net.h b/src/scraper_net.h index 8d068a577f..838ba7f5d6 100755 --- a/src/scraper_net.h +++ b/src/scraper_net.h @@ -1,13 +1,19 @@ +#pragma once + /* scraper_net.h */ -/* Maybe the parts system will be usefull for other things so let's abstract - * that to parent class. Sice it will be all in one file there will not be any - * polymorfism. +/* Maybe the parts system will be useful for other things so let's abstract + * that to parent class. Since it will be all in one file there will not be any + * polymorphism. */ -#include +#include "net.h" #include "sync.h" +#include +#include + + /** Abstract class for blobs that are split into parts. */ class CSplitBlob { @@ -60,6 +66,8 @@ class CSplitBlob static std::map mapParts; size_t cntPartsRcvd =0; + static CCriticalSection cs_mapParts; + }; /** A objects holding info about the scraper data file we have or are downloading. */ @@ -69,8 +77,12 @@ class CScraperManifest public: /* static methods */ /** map from index hash to scraper Index, so we can process Inv messages */ - static std::map< uint256, std::unique_ptr > mapManifest; + static std::map> mapManifest; + + // ------------ hash -------------- nTime ------- pointer to CScraperManifest + static std::map>> mapPendingDeletedManifest; + // Protects both mapManifest and MapPendingDeletedManifest static CCriticalSection cs_mapManifest; /** Process a message containing Index of Scraper Data. @@ -101,11 +113,14 @@ class CScraperManifest static bool IsManifestAuthorized(CPubKey& PubKey, unsigned int& banscore_out); /** Delete Manifest (key version) **/ - static bool DeleteManifest(const uint256& nHash); + static bool DeleteManifest(const uint256& nHash, const bool& fImmediate = false); /** Delete Manifest (iterator version) **/ static std::map>::iterator - DeleteManifest(std::map>::iterator& iter); + DeleteManifest(std::map>::iterator& iter, const bool& fImmediate = false); + + /** Delete PendingDeletedManifests **/ + static unsigned int DeletePendingDeletedManifests(); public: /*==== fields ====*/ @@ -155,6 +170,9 @@ class CScraperManifest void SerializeWithoutSignature(CDataStream& s, int nType, int nVersion) const; void SerializeForManifestCompare(CDataStream& ss, int nType, int nVersion) const; void UnserializeCheck(CReaderStream& s, unsigned int& banscore_out); + + bool IsManifestCurrent() const; + UniValue ToJson() const; }; diff --git a/src/serialize.h b/src/serialize.h index 82409b736c..d3198d7ddc 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -36,6 +36,23 @@ inline T& REF(const T& val) return const_cast(val); } +/** + * Used to acquire a non-const pointer "this" to generate bodies + * of const serialization operations from a template + */ +template +inline T* NCONST_PTR(const T* val) +{ + return const_cast(val); +} + +//! Safely convert odd char pointer types to standard ones. +inline char* CharCast(char* c) { return c; } +inline char* CharCast(unsigned char* c) { return (char*)c; } +inline const char* CharCast(const char* c) { return c; } +inline const char* CharCast(const unsigned char* c) { return (const char*)c; } + + ///////////////////////////////////////////////////////////////// // // Templates for serializing to anything that looks like a stream, @@ -52,6 +69,8 @@ enum // modifiers SER_SKIPSIG = (1 << 16), SER_BLOCKHEADERONLY = (1 << 17), + + // Bits 24-31 are reserved for implementation-specific modifiers. }; #define IMPLEMENT_SERIALIZE(statements) \ @@ -95,6 +114,28 @@ enum #define READWRITE(obj) (nSerSize += ::SerReadWrite(s, (obj), nType, nVersion, ser_action)) +//! Convert the reference base type to X, without changing constness or reference type. +template X& ReadWriteAsHelper(X& x) { return x; } +template const X& ReadWriteAsHelper(const X& x) { return x; } + +#define READWRITEVARIADIC(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__)) +#define READWRITEAS(type, obj) (::SerReadWriteMany(s, ser_action, ReadWriteAsHelper(obj))) + +/** + * Implement three methods for serializable objects. These are actually wrappers over + * "SerializationOp" template, which implements the body of each class' serialization + * code. Adding "ADD_SERIALIZE_METHODS" in the body of the class causes these wrappers to be + * added as members. + */ +#define ADD_SERIALIZE_METHODS \ + template \ + void Serialize(Stream& s, int, int) const { \ + NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \ + } \ + template \ + void Unserialize(Stream& s, int, int) { \ + SerializationOp(s, CSerActionUnserialize()); \ + } @@ -132,6 +173,8 @@ template inline void Serialize(Stream& s, signed long long a, template inline void Serialize(Stream& s, unsigned long long a, int, int=0) { WRITEDATA(s, a); } template inline void Serialize(Stream& s, float a, int, int=0) { WRITEDATA(s, a); } template inline void Serialize(Stream& s, double a, int, int=0) { WRITEDATA(s, a); } +template inline void Serialize(Stream& s, const char (&a)[N], int , int=0) { s.write(a, N); } +template inline void Serialize(Stream& s, const unsigned char (&a)[N], int , int=0) { s.write(CharCast(a), N); } template inline void Unserialize(Stream& s, char& a, int, int=0) { READDATA(s, a); } template inline void Unserialize(Stream& s, signed char& a, int, int=0) { READDATA(s, a); } @@ -146,6 +189,8 @@ template inline void Unserialize(Stream& s, signed long long& a template inline void Unserialize(Stream& s, unsigned long long& a, int, int=0) { READDATA(s, a); } template inline void Unserialize(Stream& s, float& a, int, int=0) { READDATA(s, a); } template inline void Unserialize(Stream& s, double& a, int, int=0) { READDATA(s, a); } +template inline void Unserialize(Stream& s, char (&a)[N], int, int=0) { s.read(a, N); } +template inline void Unserialize(Stream& s, unsigned char (&a)[N], int , int=0) { s.read(CharCast(a), N); } inline unsigned int GetSerializeSize(bool a, int, int=0) { return sizeof(char); } template inline void Serialize(Stream& s, bool a, int, int=0) { char f=a; WRITEDATA(s, f); } @@ -155,7 +200,6 @@ template inline void Unserialize(Stream& s, bool& a, int, int=0 - // // Compact size // size < 253 -- 1 byte @@ -868,6 +912,41 @@ struct ser_streamplaceholder }; +template +void SerializeMany(Stream& s) +{ +} + +template +void SerializeMany(Stream& s, const Arg& arg, const Args&... args) +{ + ::Serialize(s, arg, 0 /* type, unused */, 0 /* version, unused */); + ::SerializeMany(s, args...); +} + +template +inline void UnserializeMany(Stream& s) +{ +} + +template +inline void UnserializeMany(Stream& s, Arg&& arg, Args&&... args) +{ + ::Unserialize(s, arg, 0 /* type, unused */, 0 /* version, unused */); + ::UnserializeMany(s, args...); +} + +template +inline void SerReadWriteMany(Stream& s, CSerActionSerialize ser_action, const Args&... args) +{ + ::SerializeMany(s, args...); +} + +template +inline void SerReadWriteMany(Stream& s, CSerActionUnserialize ser_action, Args&&... args) +{ + ::UnserializeMany(s, args...); +} @@ -1304,7 +1383,7 @@ class CAutoFile operator FILE*() { return file; } FILE* operator->() { return file; } FILE& operator*() { return *file; } - FILE** operator&() { return &file; } + // FILE** operator&() { return &file; } FILE* operator=(FILE* pnew) { return file = pnew; } bool operator!() { return (file == NULL); } @@ -1332,6 +1411,16 @@ class CAutoFile void ReadVersion() { *this >> nVersion; } void WriteVersion() { *this << nVersion; } + /** Get wrapped FILE* without transfer of ownership. + * @note Ownership of the FILE* will remain with this class. Use this only if the scope of the + * CAutoFile outlives use of the passed pointer. + */ + FILE* Get() const { return file; } + + /** Return true if the wrapped FILE* is nullptr, false otherwise. + */ + bool IsNull() const { return (file == nullptr); } + CAutoFile& read(char* pch, size_t nSize) { if (!file) diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp index 3b3a2d60ad..655809f5dd 100755 --- a/src/test/DoS_tests.cpp +++ b/src/test/DoS_tests.cpp @@ -10,6 +10,7 @@ #include "wallet.h" #include "net.h" #include "util.h" +#include "banman.h" #include @@ -30,40 +31,40 @@ BOOST_AUTO_TEST_SUITE(DoS_tests) BOOST_AUTO_TEST_CASE(DoS_banning) { - CNode::ClearBanned(); + g_banman->ClearBanned(); CAddress addr1(ip(0xa0b0c001)); CNode dummyNode1(INVALID_SOCKET, addr1, "", true); dummyNode1.Misbehaving(100); // Should get banned - BOOST_CHECK(CNode::IsBanned(addr1)); - BOOST_CHECK(!CNode::IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned + BOOST_CHECK(g_banman->IsBanned(addr1)); + BOOST_CHECK(!g_banman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned CAddress addr2(ip(0xa0b0c002)); CNode dummyNode2(INVALID_SOCKET, addr2, "", true); dummyNode2.Misbehaving(50); - BOOST_CHECK(!CNode::IsBanned(addr2)); // 2 not banned yet... - BOOST_CHECK(CNode::IsBanned(addr1)); // ... but 1 still should be + BOOST_CHECK(!g_banman->IsBanned(addr2)); // 2 not banned yet... + BOOST_CHECK(g_banman->IsBanned(addr1)); // ... but 1 still should be dummyNode2.Misbehaving(50); - BOOST_CHECK(CNode::IsBanned(addr2)); + BOOST_CHECK(g_banman->IsBanned(addr2)); } BOOST_AUTO_TEST_CASE(DoS_banscore) { - CNode::ClearBanned(); + g_banman->ClearBanned(); mapArgs["-banscore"] = "111"; // because 11 is my favorite number CAddress addr1(ip(0xa0b0c001)); CNode dummyNode1(INVALID_SOCKET, addr1, "", true); dummyNode1.Misbehaving(100); - BOOST_CHECK(!CNode::IsBanned(addr1)); + BOOST_CHECK(!g_banman->IsBanned(addr1)); dummyNode1.Misbehaving(10); - BOOST_CHECK(!CNode::IsBanned(addr1)); + BOOST_CHECK(!g_banman->IsBanned(addr1)); dummyNode1.Misbehaving(1); - BOOST_CHECK(CNode::IsBanned(addr1)); + BOOST_CHECK(g_banman->IsBanned(addr1)); mapArgs.erase("-banscore"); } BOOST_AUTO_TEST_CASE(DoS_bantime) { - CNode::ClearBanned(); + g_banman->ClearBanned(); int64_t nStartTime = GetTime(); SetMockTime(nStartTime); // Overrides future calls to GetTime() @@ -71,13 +72,13 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) CNode dummyNode(INVALID_SOCKET, addr, "", true); dummyNode.Misbehaving(100); - BOOST_CHECK(CNode::IsBanned(addr)); + BOOST_CHECK(g_banman->IsBanned(addr)); SetMockTime(nStartTime+60*60); - BOOST_CHECK(CNode::IsBanned(addr)); + BOOST_CHECK(g_banman->IsBanned(addr)); SetMockTime(nStartTime+60*60*24+1); - BOOST_CHECK(!CNode::IsBanned(addr)); + BOOST_CHECK(!g_banman->IsBanned(addr)); } CTransaction RandomOrphan() diff --git a/src/test/neuralnet/superblock_tests.cpp b/src/test/neuralnet/superblock_tests.cpp index 4d29d1c0a7..29d55dc3b9 100644 --- a/src/test/neuralnet/superblock_tests.cpp +++ b/src/test/neuralnet/superblock_tests.cpp @@ -1,6 +1,7 @@ #include "compat/endian.h" #include "neuralnet/superblock.h" +#include #include #include #include @@ -250,6 +251,27 @@ ScraperStats GetTestScraperStats() return stats; } + +ConvergedScraperStats GetTestConvergence(const bool by_parts = false) +{ + ConvergedScraperStats convergence; + + convergence.mScraperConvergedStats = GetTestScraperStats(); + + convergence.Convergence.bByParts = by_parts; + convergence.Convergence.nContentHash + = uint256("1111111111111111111111111111111111111111111111111111111111111111"); + convergence.Convergence.nUnderlyingManifestContentHash + = uint256("2222222222222222222222222222222222222222222222222222222222222222"); + + // Add some project parts with the same names as the projects in the stats. + // The part data doesn't matter, so we just add empty containers. + // + convergence.Convergence.ConvergedManifestPartsMap.emplace("project_1", CSerializeData()); + convergence.Convergence.ConvergedManifestPartsMap.emplace("project_2", CSerializeData()); + + return convergence; +} } // anonymous namespace // ----------------------------------------------------------------------------- @@ -353,6 +375,8 @@ BOOST_AUTO_TEST_CASE(it_initializes_to_an_empty_superblock) NN::Superblock superblock; BOOST_CHECK(superblock.m_version == NN::Superblock::CURRENT_VERSION); + BOOST_CHECK(superblock.m_convergence_hint == 0); + BOOST_CHECK(superblock.m_manifest_content_hint == 0); BOOST_CHECK(superblock.m_cpids.empty() == true); BOOST_CHECK(superblock.m_cpids.TotalMagnitude() == 0); @@ -371,6 +395,8 @@ BOOST_AUTO_TEST_CASE(it_initializes_to_the_specified_version) NN::Superblock superblock(1); BOOST_CHECK(superblock.m_version == 1); + BOOST_CHECK(superblock.m_convergence_hint == 0); + BOOST_CHECK(superblock.m_manifest_content_hint == 0); BOOST_CHECK(superblock.m_cpids.empty() == true); BOOST_CHECK(superblock.m_cpids.TotalMagnitude() == 0); @@ -388,6 +414,54 @@ BOOST_AUTO_TEST_CASE(it_initializes_from_a_provided_set_of_scraper_statistics) { NN::Superblock superblock = NN::Superblock::FromStats(GetTestScraperStats()); + BOOST_CHECK(superblock.m_version == NN::Superblock::CURRENT_VERSION); + BOOST_CHECK(superblock.m_convergence_hint == 0); + BOOST_CHECK(superblock.m_manifest_content_hint == 0); + + auto& cpids = superblock.m_cpids; + BOOST_CHECK(cpids.size() == 2); + BOOST_CHECK(cpids.TotalMagnitude() == 10016); + BOOST_CHECK(cpids.AverageMagnitude() == 5008); + BOOST_CHECK(cpids.At(0)->first.ToString() == "00010203040506070809101112131415"); + BOOST_CHECK(cpids.At(0)->second == 4008); + BOOST_CHECK(cpids.At(1)->first.ToString() == "15141312111009080706050403020100"); + BOOST_CHECK(cpids.At(1)->second == 6008); + + auto& projects = superblock.m_projects; + BOOST_CHECK(projects.size() == 2); + BOOST_CHECK(projects.TotalRac() == 410); + BOOST_CHECK(projects.AverageRac() == 205.0); + + if (const auto project_1 = projects.Try("project_1")) { + BOOST_CHECK(project_1->m_total_credit == 3000); + BOOST_CHECK(project_1->m_average_rac == 102); + BOOST_CHECK(project_1->m_rac == 203); + BOOST_CHECK(project_1->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project 1 not found in superblock."); + } + + if (const auto project_2 = projects.Try("project_2")) { + BOOST_CHECK(project_2->m_total_credit == 7000); + BOOST_CHECK(project_2->m_average_rac == 104); + BOOST_CHECK(project_2->m_rac == 207); + BOOST_CHECK(project_2->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project 2 not found in superblock."); + } +} + +BOOST_AUTO_TEST_CASE(it_initializes_from_a_provided_scraper_convergnce) +{ + NN::Superblock superblock = NN::Superblock::FromConvergence(GetTestConvergence()); + + BOOST_CHECK(superblock.m_version == NN::Superblock::CURRENT_VERSION); + + // This initialization mode must set the convergence hint derived from + // the content hash of the convergence: + BOOST_CHECK(superblock.m_convergence_hint == 0x11111111); + BOOST_CHECK(superblock.m_manifest_content_hint == 0x22222222); + auto& cpids = superblock.m_cpids; BOOST_CHECK(cpids.size() == 2); BOOST_CHECK(cpids.TotalMagnitude() == 10016); @@ -398,6 +472,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_from_a_provided_set_of_scraper_statistics) BOOST_CHECK(cpids.At(1)->second == 6008); auto& projects = superblock.m_projects; + BOOST_CHECK(projects.m_converged_by_project == false); BOOST_CHECK(projects.size() == 2); BOOST_CHECK(projects.TotalRac() == 410); BOOST_CHECK(projects.AverageRac() == 205.0); @@ -406,6 +481,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_from_a_provided_set_of_scraper_statistics) BOOST_CHECK(project_1->m_total_credit == 3000); BOOST_CHECK(project_1->m_average_rac == 102); BOOST_CHECK(project_1->m_rac == 203); + BOOST_CHECK(project_1->m_convergence_hint == 0); } else { BOOST_FAIL("Project 1 not found in superblock."); } @@ -414,6 +490,59 @@ BOOST_AUTO_TEST_CASE(it_initializes_from_a_provided_set_of_scraper_statistics) BOOST_CHECK(project_2->m_total_credit == 7000); BOOST_CHECK(project_2->m_average_rac == 104); BOOST_CHECK(project_2->m_rac == 207); + BOOST_CHECK(project_2->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project 2 not found in superblock."); + } +} + +BOOST_AUTO_TEST_CASE(it_initializes_from_a_fallback_by_project_scraper_convergnce) +{ + NN::Superblock superblock = NN::Superblock::FromConvergence( + GetTestConvergence(true)); // Set fallback by project flag + + BOOST_CHECK(superblock.m_version == NN::Superblock::CURRENT_VERSION); + BOOST_CHECK(superblock.m_convergence_hint == 0x11111111); + // Manifest content hint not set for fallback convergence: + BOOST_CHECK(superblock.m_manifest_content_hint == 0x00000000); + + auto& cpids = superblock.m_cpids; + BOOST_CHECK(cpids.size() == 2); + BOOST_CHECK(cpids.TotalMagnitude() == 10016); + BOOST_CHECK(cpids.AverageMagnitude() == 5008); + BOOST_CHECK(cpids.At(0)->first.ToString() == "00010203040506070809101112131415"); + BOOST_CHECK(cpids.At(0)->second == 4008); + BOOST_CHECK(cpids.At(1)->first.ToString() == "15141312111009080706050403020100"); + BOOST_CHECK(cpids.At(1)->second == 6008); + + auto& projects = superblock.m_projects; + + // By project flag must be true in a fallback-to-project convergence: + BOOST_CHECK(projects.m_converged_by_project == true); + BOOST_CHECK(projects.size() == 2); + BOOST_CHECK(projects.TotalRac() == 410); + BOOST_CHECK(projects.AverageRac() == 205.0); + + if (const auto project_1 = projects.Try("project_1")) { + BOOST_CHECK(project_1->m_total_credit == 3000); + BOOST_CHECK(project_1->m_average_rac == 102); + BOOST_CHECK(project_1->m_rac == 203); + + // The convergence hint must be set in fallback-to-project convergence. + // This is derived from the hash of an empty part data: + BOOST_CHECK(project_1->m_convergence_hint == 0xd3591376); + } else { + BOOST_FAIL("Project 1 not found in superblock."); + } + + if (const auto project_2 = projects.Try("project_2")) { + BOOST_CHECK(project_2->m_total_credit == 7000); + BOOST_CHECK(project_2->m_average_rac == 104); + BOOST_CHECK(project_2->m_rac == 207); + + // The convergence hint must be set in fallback-to-project convergence. + // This is derived from the hash of an empty part data: + BOOST_CHECK(project_2->m_convergence_hint == 0xd3591376); } else { BOOST_FAIL("Project 2 not found in superblock."); } @@ -449,6 +578,8 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_binary_contract) // Legacy string-packed superblocks unpack to version 1: BOOST_CHECK(superblock.m_version == 1); + BOOST_CHECK(superblock.m_convergence_hint == 0); + BOOST_CHECK(superblock.m_manifest_content_hint == 0); BOOST_CHECK(superblock.m_cpids.size() == 3); BOOST_CHECK(superblock.m_cpids.Zeros() == 5); @@ -459,6 +590,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_binary_contract) BOOST_CHECK(superblock.m_cpids.At(2)->first.ToString() == cpid3); BOOST_CHECK(superblock.m_cpids.At(2)->second == 200); + BOOST_CHECK(superblock.m_projects.m_converged_by_project == false); BOOST_CHECK(superblock.m_projects.size() == 2); BOOST_CHECK(superblock.m_projects.TotalRac() == 1023); BOOST_CHECK(superblock.m_projects.AverageRac() == 511.5); @@ -467,6 +599,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_binary_contract) BOOST_CHECK(project_1->m_total_credit == 0); BOOST_CHECK(project_1->m_average_rac == 123); BOOST_CHECK(project_1->m_rac == 456); + BOOST_CHECK(project_1->m_convergence_hint == 0); } else { BOOST_FAIL("Project 1 not found in superblock."); } @@ -475,6 +608,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_binary_contract) BOOST_CHECK(project_2->m_total_credit == 0); BOOST_CHECK(project_2->m_average_rac == 234); BOOST_CHECK(project_2->m_rac == 567); + BOOST_CHECK(project_2->m_convergence_hint == 0); } else { BOOST_FAIL("Project 2 not found in superblock."); } @@ -508,6 +642,8 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_text_contract) // Legacy string-packed superblocks unpack to version 1: BOOST_CHECK(superblock.m_version == 1); + BOOST_CHECK(superblock.m_convergence_hint == 0); + BOOST_CHECK(superblock.m_manifest_content_hint == 0); BOOST_CHECK(superblock.m_cpids.size() == 3); BOOST_CHECK(superblock.m_cpids.Zeros() == 0); @@ -518,6 +654,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_text_contract) BOOST_CHECK(superblock.m_cpids.At(2)->first.ToString() == cpid3); BOOST_CHECK(superblock.m_cpids.At(2)->second == 200); + BOOST_CHECK(superblock.m_projects.m_converged_by_project == false); BOOST_CHECK(superblock.m_projects.size() == 2); BOOST_CHECK(superblock.m_projects.TotalRac() == 1023); BOOST_CHECK(superblock.m_projects.AverageRac() == 511.5); @@ -526,6 +663,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_text_contract) BOOST_CHECK(project_1->m_total_credit == 0); BOOST_CHECK(project_1->m_average_rac == 123); BOOST_CHECK(project_1->m_rac == 456); + BOOST_CHECK(project_1->m_convergence_hint == 0); } else { BOOST_FAIL("Project 1 not found in superblock."); } @@ -534,11 +672,21 @@ BOOST_AUTO_TEST_CASE(it_initializes_by_unpacking_a_legacy_text_contract) BOOST_CHECK(project_2->m_total_credit == 0); BOOST_CHECK(project_2->m_average_rac == 234); BOOST_CHECK(project_2->m_rac == 567); + BOOST_CHECK(project_2->m_convergence_hint == 0); } else { BOOST_FAIL("Project 2 not found in superblock."); } } +BOOST_AUTO_TEST_CASE(it_initializes_to_an_empty_superblock_for_empty_strings) +{ + NN::Superblock superblock = NN::Superblock::UnpackLegacy(""); + + BOOST_CHECK(superblock.m_version == 1); + BOOST_CHECK(superblock.m_cpids.empty()); + BOOST_CHECK(superblock.m_projects.empty()); +} + BOOST_AUTO_TEST_CASE(it_provides_backward_compatibility_for_legacy_contracts) { const std::string legacy_contract( @@ -578,6 +726,46 @@ BOOST_AUTO_TEST_CASE(it_provides_backward_compatibility_for_legacy_contracts) BOOST_CHECK(Legacy::GetQuorumHash(unpacked) == expected_hash); } +BOOST_AUTO_TEST_CASE(it_determines_whether_it_represents_a_complete_superblock) +{ + NN::Superblock valid; + + valid.m_cpids.Add(NN::Cpid(), 123); + valid.m_projects.Add("name", NN::Superblock::ProjectStats()); + + BOOST_CHECK(valid.WellFormed() == true); + + NN::Superblock invalid = valid; + + invalid.m_version = 0; + BOOST_CHECK(invalid.WellFormed() == false); + + invalid.m_version = std::numeric_limits::max(); + BOOST_CHECK(invalid.WellFormed() == false); + + invalid = valid; + + invalid.m_cpids = NN::Superblock::CpidIndex(); + BOOST_CHECK(invalid.WellFormed() == false); + + invalid = valid; + + invalid.m_projects = NN::Superblock::ProjectIndex(); + BOOST_CHECK(invalid.WellFormed() == false); +} + +BOOST_AUTO_TEST_CASE(it_checks_whether_it_was_created_from_fallback_convergence) +{ + NN::Superblock superblock; + + BOOST_CHECK(superblock.ConvergedByProject() == false); + + superblock.m_projects.Add("project_name", NN::Superblock::ProjectStats()); + superblock.m_projects.SetHint("project_name", CSerializeData()); + + BOOST_CHECK(superblock.ConvergedByProject() == true); +} + BOOST_AUTO_TEST_CASE(it_calculates_its_age) { NN::Superblock superblock; @@ -588,10 +776,47 @@ BOOST_AUTO_TEST_CASE(it_calculates_its_age) BOOST_CHECK(superblock.Age() < GetAdjustedTime()); } +BOOST_AUTO_TEST_CASE(it_generates_its_quorum_hash) +{ + NN::Superblock superblock; + + BOOST_CHECK(superblock.GetHash() == NN::QuorumHash::Hash(superblock)); +} + +BOOST_AUTO_TEST_CASE(it_caches_its_quorum_hash) +{ + NN::Superblock superblock; + + // Cache the hash: + NN::QuorumHash original_hash = superblock.GetHash(); + + // Change the resulting hash: + superblock.m_cpids.Add(NN::Cpid(), 123); + + // The cached hash should not change: + BOOST_CHECK(superblock.GetHash() == original_hash); +} + +BOOST_AUTO_TEST_CASE(it_regenerates_its_cached_quorum_hash) +{ + NN::Superblock superblock; + + // Cache the hash: + superblock.GetHash(); + + // Change the resulting hash: + superblock.m_cpids.Add(NN::Cpid(), 123); + + // Regenrate the hash: + BOOST_CHECK(superblock.GetHash(true) == NN::QuorumHash::Hash(superblock)); +} + BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream) { std::vector expected { 0x02, 0x00, 0x00, 0x00, // Version + 0x11, 0x11, 0x11, 0x11, // Convergence hint + 0x22, 0x22, 0x22, 0x22, // Manifest content hint 0x02, // CPIDs size 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // CPID 1 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, // ... @@ -600,6 +825,7 @@ BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream) 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, // ... 0xfd, 0x78, 0x17, // Magnitude 0x00, // Zero count (VARINT) + 0x00, // By-project flag 0x02, // Projects size 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key 0x5f, 0x31, // ... @@ -613,7 +839,7 @@ BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream) 0x80, 0x4f, // Total RAC (VARINT) }; - NN::Superblock superblock = NN::Superblock::FromStats(GetTestScraperStats()); + NN::Superblock superblock = NN::Superblock::FromConvergence(GetTestConvergence()); BOOST_CHECK(superblock.GetSerializeSize(SER_NETWORK, 1) == expected.size()); @@ -628,6 +854,128 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) { std::vector bytes { 0x02, 0x00, 0x00, 0x00, // Version + 0x11, 0x11, 0x11, 0x11, // Convergence hint + 0x22, 0x22, 0x22, 0x22, // Manifest content hint + 0x02, // CPIDs size + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // CPID 1 + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, // ... + 0xfd, 0xa8, 0x0f, // Magnitude + 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x09, 0x08, // CPID 2 + 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, // ... + 0xfd, 0x78, 0x17, // Magnitude + 0x00, // Zero count (VARINT) + 0x00, // By-project flag + 0x02, // Projects size + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key + 0x5f, 0x31, // ... + 0x96, 0x38, // Total credit (VARINT) + 0x66, // Average RAC (VARINT) + 0x80, 0x4b, // Total RAC (VARINT) + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_2" key + 0x5f, 0x32, // ... + 0xb5, 0x58, // Total credit (VARINT) + 0x68, // Average RAC (VARINT) + 0x80, 0x4f, // Total RAC (VARINT) + }; + + NN::Superblock superblock; + + CDataStream stream(bytes, SER_NETWORK, 1); + stream >> superblock; + + BOOST_CHECK(superblock.m_version == 2); + BOOST_CHECK(superblock.m_convergence_hint == 0x11111111); + BOOST_CHECK(superblock.m_manifest_content_hint == 0x22222222); + + const auto& cpids = superblock.m_cpids; + BOOST_CHECK(cpids.size() == 2); + BOOST_CHECK(cpids.Zeros() == 0); + BOOST_CHECK(cpids.TotalMagnitude() == 10016); + BOOST_CHECK(cpids.AverageMagnitude() == 5008.0); + BOOST_CHECK(cpids.At(0)->first.ToString() == "00010203040506070809101112131415"); + BOOST_CHECK(cpids.At(0)->second == 4008); + BOOST_CHECK(cpids.At(1)->first.ToString() == "15141312111009080706050403020100"); + BOOST_CHECK(cpids.At(1)->second == 6008); + + const auto& projects = superblock.m_projects; + BOOST_CHECK(projects.m_converged_by_project == false); + BOOST_CHECK(projects.size() == 2); + BOOST_CHECK(projects.TotalRac() == 410); + BOOST_CHECK(projects.AverageRac() == 205.0); + + if (const auto project1 = projects.Try("project_1")) { + BOOST_CHECK(project1->m_total_credit == 3000); + BOOST_CHECK(project1->m_average_rac == 102); + BOOST_CHECK(project1->m_rac == 203); + BOOST_CHECK(project1->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project 1 not found in index."); + } + + if (const auto project2 = projects.Try("project_2")) { + BOOST_CHECK(project2->m_total_credit == 7000); + BOOST_CHECK(project2->m_average_rac == 104); + BOOST_CHECK(project2->m_rac == 207); + BOOST_CHECK(project2->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project 2 not found in index."); + } +} + +BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream_for_fallback_convergences) +{ + // Superblocks generated from fallback-by-project convergences include + // convergence hints with a by-project flag set to 1: + // + std::vector expected { + 0x02, 0x00, 0x00, 0x00, // Version + 0x11, 0x11, 0x11, 0x11, // Convergence hint + 0x00, 0x00, 0x00, 0x00, // Manifest content hint + 0x02, // CPIDs size + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // CPID 1 + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, // ... + 0xfd, 0xa8, 0x0f, // Magnitude + 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x09, 0x08, // CPID 2 + 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, // ... + 0xfd, 0x78, 0x17, // Magnitude + 0x00, // Zero count (VARINT) + 0x01, // By-project flag + 0x02, // Projects size + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key + 0x5f, 0x31, // ... + 0x96, 0x38, // Total credit (VARINT) + 0x66, // Average RAC (VARINT) + 0x80, 0x4b, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_2" key + 0x5f, 0x32, // ... + 0xb5, 0x58, // Total credit (VARINT) + 0x68, // Average RAC (VARINT) + 0x80, 0x4f, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint + }; + + NN::Superblock superblock = NN::Superblock::FromConvergence( + GetTestConvergence(true)); // Set fallback by project flag + + BOOST_CHECK(superblock.GetSerializeSize(SER_NETWORK, 1) == expected.size()); + + CDataStream stream(SER_NETWORK, 1); + stream << superblock; + std::vector output(stream.begin(), stream.end()); + + BOOST_CHECK(output == expected); +} + +BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream_for_fallback_convergence) +{ + // Superblocks generated from fallback-by-project convergences include + // convergence hints with a by-project flag set to 1: + // + std::vector bytes { + 0x02, 0x00, 0x00, 0x00, // Version + 0x11, 0x11, 0x11, 0x11, // Convergence hint + 0x22, 0x22, 0x22, 0x22, // Manifest content hint 0x02, // CPIDs size 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // CPID 1 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, // ... @@ -636,17 +984,20 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, // ... 0xfd, 0x78, 0x17, // Magnitude 0x00, // Zero count (VARINT) + 0x01, // By-project flag 0x02, // Projects size 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key 0x5f, 0x31, // ... 0x96, 0x38, // Total credit (VARINT) 0x66, // Average RAC (VARINT) 0x80, 0x4b, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_2" key 0x5f, 0x32, // ... 0xb5, 0x58, // Total credit (VARINT) 0x68, // Average RAC (VARINT) 0x80, 0x4f, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint }; NN::Superblock superblock; @@ -655,6 +1006,8 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) stream >> superblock; BOOST_CHECK(superblock.m_version == 2); + BOOST_CHECK(superblock.m_convergence_hint == 0x11111111); + BOOST_CHECK(superblock.m_manifest_content_hint == 0x22222222); const auto& cpids = superblock.m_cpids; BOOST_CHECK(cpids.size() == 2); @@ -667,6 +1020,7 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) BOOST_CHECK(cpids.At(1)->second == 6008); const auto& projects = superblock.m_projects; + BOOST_CHECK(projects.m_converged_by_project == true); BOOST_CHECK(projects.size() == 2); BOOST_CHECK(projects.TotalRac() == 410); BOOST_CHECK(projects.AverageRac() == 205.0); @@ -675,6 +1029,7 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) BOOST_CHECK(project1->m_total_credit == 3000); BOOST_CHECK(project1->m_average_rac == 102); BOOST_CHECK(project1->m_rac == 203); + BOOST_CHECK(project1->m_convergence_hint == 0xd3591376); } else { BOOST_FAIL("Project 1 not found in index."); } @@ -683,6 +1038,7 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) BOOST_CHECK(project2->m_total_credit == 7000); BOOST_CHECK(project2->m_average_rac == 104); BOOST_CHECK(project2->m_rac == 207); + BOOST_CHECK(project2->m_convergence_hint == 0xd3591376); } else { BOOST_FAIL("Project 2 not found in index."); } @@ -977,6 +1333,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_to_a_zero_statistics_object) BOOST_CHECK(stats.m_total_credit == 0); BOOST_CHECK(stats.m_average_rac == 0); BOOST_CHECK(stats.m_rac == 0); + BOOST_CHECK(stats.m_convergence_hint == 0); } BOOST_AUTO_TEST_CASE(it_initializes_to_the_supplied_statistics) @@ -986,6 +1343,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_to_the_supplied_statistics) BOOST_CHECK(stats.m_total_credit == 123); BOOST_CHECK(stats.m_average_rac == 456); BOOST_CHECK(stats.m_rac == 789); + BOOST_CHECK(stats.m_convergence_hint == 0); } BOOST_AUTO_TEST_CASE(it_initializes_to_supplied_legacy_superblock_statistics) @@ -995,6 +1353,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_to_supplied_legacy_superblock_statistics) BOOST_CHECK(stats.m_total_credit == 0); BOOST_CHECK(stats.m_average_rac == 123); BOOST_CHECK(stats.m_rac == 456); + BOOST_CHECK(stats.m_convergence_hint == 0); } BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream) @@ -1032,6 +1391,7 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) BOOST_CHECK(project.m_total_credit == 1); BOOST_CHECK(project.m_average_rac == 2); BOOST_CHECK(project.m_rac == 3); + BOOST_CHECK(project.m_convergence_hint == 0); } BOOST_AUTO_TEST_SUITE_END() @@ -1046,6 +1406,7 @@ BOOST_AUTO_TEST_CASE(it_initializes_to_an_empty_index) { NN::Superblock::ProjectIndex projects; + BOOST_CHECK(projects.m_converged_by_project == false); BOOST_CHECK(projects.size() == 0); } @@ -1089,6 +1450,24 @@ BOOST_AUTO_TEST_CASE(it_fetches_the_statistics_of_a_specific_project) BOOST_CHECK(project->m_total_credit == 123); BOOST_CHECK(project->m_average_rac == 456); BOOST_CHECK(project->m_rac == 789); + BOOST_CHECK(project->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project not found in index."); + } +} + +BOOST_AUTO_TEST_CASE(it_sets_a_project_part_convergence_hint) +{ + NN::Superblock::ProjectIndex projects; + + projects.Add("project_name", NN::Superblock::ProjectStats()); + projects.SetHint("project_name", CSerializeData()); + + BOOST_CHECK(projects.m_converged_by_project == true); + + if (const auto project = projects.Try("project_name")) { + // Hint derived from the hash of an empty part data: + BOOST_CHECK(project->m_convergence_hint == 0xd3591376); } else { BOOST_FAIL("Project not found in index."); } @@ -1223,6 +1602,7 @@ BOOST_AUTO_TEST_CASE(it_is_iterable) BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream) { const std::vector expected { + 0x00, // By-project flag 0x02, // Projects size 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key 0x5f, 0x31, // ... @@ -1253,6 +1633,7 @@ BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream) BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) { const std::vector bytes { + 0x00, // By-project flag 0x02, // Projects size 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key 0x5f, 0x31, // ... @@ -1271,12 +1652,14 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) CDataStream stream(bytes, SER_NETWORK, 1); stream >> projects; + BOOST_CHECK(projects.m_converged_by_project == false); BOOST_CHECK(projects.size() == 2); if (const auto project1 = projects.Try("project_1")) { BOOST_CHECK(project1->m_total_credit == 1); BOOST_CHECK(project1->m_average_rac == 2); BOOST_CHECK(project1->m_rac == 3); + BOOST_CHECK(project1->m_convergence_hint == 0); } else { BOOST_FAIL("Project 1 not found in index."); } @@ -1285,6 +1668,98 @@ BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream) BOOST_CHECK(project2->m_total_credit == 1); BOOST_CHECK(project2->m_average_rac == 2); BOOST_CHECK(project2->m_rac == 3); + BOOST_CHECK(project2->m_convergence_hint == 0); + } else { + BOOST_FAIL("Project 2 not found in index."); + } + + BOOST_CHECK(projects.TotalRac() == 6); + BOOST_CHECK(projects.AverageRac() == 3); +} + +BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream_for_fallback_convergences) +{ + // A project index generated from fallback-by-project convergences includes + // convergence hints with a by-project flag set to 1: + // + const std::vector expected { + 0x01, // By-project flag + 0x02, // Projects size + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key + 0x5f, 0x31, // ... + 0x01, // Total credit (VARINT) + 0x02, // Average RAC (VARINT) + 0x03, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_2" key + 0x5f, 0x32, // ... + 0x01, // Total credit (VARINT) + 0x02, // Average RAC (VARINT) + 0x03, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint + }; + + NN::Superblock::ProjectIndex projects; + + projects.Add("project_1", NN::Superblock::ProjectStats(1, 2, 3)); + projects.Add("project_2", NN::Superblock::ProjectStats(1, 2, 3)); + + projects.SetHint("project_1", CSerializeData()); + projects.SetHint("project_2", CSerializeData()); + + BOOST_CHECK(projects.GetSerializeSize(SER_NETWORK, 1) == expected.size()); + + CDataStream stream(SER_NETWORK, 1); + stream << projects; + std::vector output(stream.begin(), stream.end()); + + BOOST_CHECK(output == expected); +} + +BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream_for_fallback_convergence) +{ + // A project index generated from fallback-by-project convergences includes + // convergence hints with a by-project flag set to 1: + // + const std::vector bytes { + 0x01, // By-project flag + 0x02, // Projects size + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_1" key + 0x5f, 0x31, // ... + 0x01, // Total credit (VARINT) + 0x02, // Average RAC (VARINT) + 0x03, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint + 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, // "project_2" key + 0x5f, 0x32, // ... + 0x01, // Total credit (VARINT) + 0x02, // Average RAC (VARINT) + 0x03, // Total RAC (VARINT) + 0x76, 0x13, 0x59, 0xd3, // Convergence hint + }; + + NN::Superblock::ProjectIndex projects; + + CDataStream stream(bytes, SER_NETWORK, 1); + stream >> projects; + + BOOST_CHECK(projects.m_converged_by_project == true); + BOOST_CHECK(projects.size() == 2); + + if (const auto project1 = projects.Try("project_1")) { + BOOST_CHECK(project1->m_total_credit == 1); + BOOST_CHECK(project1->m_average_rac == 2); + BOOST_CHECK(project1->m_rac == 3); + BOOST_CHECK(project1->m_convergence_hint == 0xd3591376); + } else { + BOOST_FAIL("Project 1 not found in index."); + } + + if (const auto project2 = projects.Try("project_2")) { + BOOST_CHECK(project2->m_total_credit == 1); + BOOST_CHECK(project2->m_average_rac == 2); + BOOST_CHECK(project2->m_rac == 3); + BOOST_CHECK(project2->m_convergence_hint == 0xd3591376); } else { BOOST_FAIL("Project 2 not found in index."); } @@ -1377,6 +1852,9 @@ BOOST_AUTO_TEST_CASE(it_hashes_a_superblock) projects.Add("project_1", NN::Superblock::ProjectStats(0, 0, 0)); projects.Add("project_2", NN::Superblock::ProjectStats(0, 0, 0)); + // Note: convergence hints embedded in a superblock are NOT considered + // when generating the superblock hash: + // std::vector input { 0x02, // CPIDs size 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // CPID 1 @@ -1608,4 +2086,127 @@ BOOST_AUTO_TEST_CASE(it_is_hashable_to_key_a_lookup_map) BOOST_CHECK(hasher(hash_md5) == 2024957465561532936); } +BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream_for_invalid) +{ + const NN::QuorumHash hash; + + BOOST_CHECK(hash.GetSerializeSize(SER_NETWORK, 1) == 1); + + CDataStream stream(SER_NETWORK, 1); + stream << hash; + + BOOST_CHECK(stream.size() == 1); + BOOST_CHECK(stream[0] == 0x00); // QuorumHash::Kind::INVALID +} + +BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream_for_sha256) +{ + const std::vector expected { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + }; + + const NN::QuorumHash hash(expected); + + BOOST_CHECK(hash.GetSerializeSize(SER_NETWORK, 1) == 33); + + CDataStream stream(SER_NETWORK, 1); + stream << hash; + const std::vector output(stream.begin(), stream.end()); + + BOOST_CHECK(output[0] == 0x01); // QuorumHash::Kind::SHA256 + + BOOST_CHECK_EQUAL_COLLECTIONS( + ++output.begin(), // we already checked the first byte + output.end(), + expected.begin(), + expected.end()); +} + +BOOST_AUTO_TEST_CASE(it_serializes_to_a_stream_for_md5) +{ + const std::vector expected { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + }; + + const NN::QuorumHash hash(expected); + + BOOST_CHECK(hash.GetSerializeSize(SER_NETWORK, 1) == 17); + + CDataStream stream(SER_NETWORK, 1); + stream << hash; + const std::vector output(stream.begin(), stream.end()); + + BOOST_CHECK(output[0] == 0x02); // QuorumHash::Kind::MD5 + + BOOST_CHECK_EQUAL_COLLECTIONS( + ++output.begin(), // we already checked the first byte + output.end(), + expected.begin(), + expected.end()); +} + +BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream_for_invalid) +{ + // Initialize quorum hash with a valid value to test invalid: + NN::QuorumHash hash(NN::QuorumHash::Md5Sum { }); // Initialize to zeros + + CDataStream stream(SER_NETWORK, 1); + stream << (unsigned char)0x00; // QuorumHash::Kind::INVALID + stream >> hash; + + BOOST_CHECK(hash.Which() == NN::QuorumHash::Kind::INVALID); +} + +BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream_for_sha256) +{ + NN::QuorumHash hash; + + const std::array expected { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + }; + + CDataStream stream(SER_NETWORK, 1); + stream << (unsigned char)0x01; // QuorumHash::Kind::SHA256 + stream << FLATDATA(expected); + stream >> hash; + + BOOST_CHECK(hash.Which() == NN::QuorumHash::Kind::SHA256); + + BOOST_CHECK_EQUAL_COLLECTIONS( + hash.Raw(), + hash.Raw() + 32, + expected.begin(), + expected.end()); +} + +BOOST_AUTO_TEST_CASE(it_deserializes_from_a_stream_for_md5) +{ + NN::QuorumHash hash; + + const std::array expected { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + }; + + CDataStream stream(SER_NETWORK, 1); + stream << (unsigned char)0x02; // QuorumHash::Kind::MD5 + stream << FLATDATA(expected); + stream >> hash; + + BOOST_CHECK(hash.Which() == NN::QuorumHash::Kind::MD5); + + BOOST_CHECK_EQUAL_COLLECTIONS( + hash.Raw(), + hash.Raw() + 16, + expected.begin(), + expected.end()); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/test_gridcoin.cpp b/src/test/test_gridcoin.cpp index 0b7c899e3b..0c864bc55a 100644 --- a/src/test/test_gridcoin.cpp +++ b/src/test/test_gridcoin.cpp @@ -4,9 +4,10 @@ #include "db.h" #include "main.h" #include "wallet.h" +#include "banman.h" -CWallet* pwalletMain; -CClientUIInterface uiInterface; +extern CWallet* pwalletMain; +extern CClientUIInterface uiInterface; extern bool fPrintToConsole; extern void noui_connect(); @@ -21,24 +22,18 @@ struct TestingSetup { pwalletMain = new CWallet("wallet.dat"); pwalletMain->LoadWallet(fFirstRun); RegisterWallet(pwalletMain); + // Ban manager instance should not already be instantiated + assert(!g_banman); + // Create ban manager instance. + g_banman = MakeUnique(GetDataDir() / "banlist.dat", &uiInterface, GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); } ~TestingSetup() { delete pwalletMain; pwalletMain = NULL; bitdb.Flush(true); + g_banman.reset(); } }; BOOST_GLOBAL_FIXTURE(TestingSetup); - -void Shutdown(void* parg) -{ - exit(0); -} - -void StartShutdown() -{ - exit(0); -} - diff --git a/src/ui_interface.h b/src/ui_interface.h index c6932f417b..54a03c907f 100644 --- a/src/ui_interface.h +++ b/src/ui_interface.h @@ -95,6 +95,9 @@ class CClientUIInterface /** Number of network connections changed. */ boost::signals2::signal NotifyNumConnectionsChanged; + /** Ban list changed. */ + boost::signals2::signal BannedListChanged; + /** * New, updated or cancelled alert. * @note called with lock cs_mapAlerts held. diff --git a/src/util.cpp b/src/util.cpp index e8be303afb..ed1512160f 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -919,6 +919,97 @@ string DecodeBase32(const string& str) return string((const char*)&vchRet[0], vchRet.size()); } +NODISCARD static bool ParsePrechecks(const std::string& str) +{ + if (str.empty()) // No empty string allowed + return false; + if (str.size() >= 1 && (IsSpace(str[0]) || IsSpace(str[str.size()-1]))) // No padding allowed + return false; + if (str.size() != strlen(str.c_str())) // No embedded NUL characters allowed + return false; + return true; +} + +bool ParseInt32(const std::string& str, int32_t *out) +{ + if (!ParsePrechecks(str)) + return false; + char *endp = nullptr; + errno = 0; // strtol will not set errno if valid + long int n = strtol(str.c_str(), &endp, 10); + if(out) *out = (int32_t)n; + // Note that strtol returns a *long int*, so even if strtol doesn't report an over/underflow + // we still have to check that the returned value is within the range of an *int32_t*. On 64-bit + // platforms the size of these types may be different. + return endp && *endp == 0 && !errno && + n >= std::numeric_limits::min() && + n <= std::numeric_limits::max(); +} + +bool ParseInt64(const std::string& str, int64_t *out) +{ + if (!ParsePrechecks(str)) + return false; + char *endp = nullptr; + errno = 0; // strtoll will not set errno if valid + long long int n = strtoll(str.c_str(), &endp, 10); + if(out) *out = (int64_t)n; + // Note that strtoll returns a *long long int*, so even if strtol doesn't report an over/underflow + // we still have to check that the returned value is within the range of an *int64_t*. + return endp && *endp == 0 && !errno && + n >= std::numeric_limits::min() && + n <= std::numeric_limits::max(); +} + +bool ParseUInt32(const std::string& str, uint32_t *out) +{ + if (!ParsePrechecks(str)) + return false; + if (str.size() >= 1 && str[0] == '-') // Reject negative values, unfortunately strtoul accepts these by default if they fit in the range + return false; + char *endp = nullptr; + errno = 0; // strtoul will not set errno if valid + unsigned long int n = strtoul(str.c_str(), &endp, 10); + if(out) *out = (uint32_t)n; + // Note that strtoul returns a *unsigned long int*, so even if it doesn't report an over/underflow + // we still have to check that the returned value is within the range of an *uint32_t*. On 64-bit + // platforms the size of these types may be different. + return endp && *endp == 0 && !errno && + n <= std::numeric_limits::max(); +} + +bool ParseUInt64(const std::string& str, uint64_t *out) +{ + if (!ParsePrechecks(str)) + return false; + if (str.size() >= 1 && str[0] == '-') // Reject negative values, unfortunately strtoull accepts these by default if they fit in the range + return false; + char *endp = nullptr; + errno = 0; // strtoull will not set errno if valid + unsigned long long int n = strtoull(str.c_str(), &endp, 10); + if(out) *out = (uint64_t)n; + // Note that strtoull returns a *unsigned long long int*, so even if it doesn't report an over/underflow + // we still have to check that the returned value is within the range of an *uint64_t*. + return endp && *endp == 0 && !errno && + n <= std::numeric_limits::max(); +} + + +bool ParseDouble(const std::string& str, double *out) +{ + if (!ParsePrechecks(str)) + return false; + if (str.size() >= 2 && str[0] == '0' && str[1] == 'x') // No hexadecimal floats allowed + return false; + std::istringstream text(str); + text.imbue(std::locale::classic()); + double result; + text >> result; + if(out) *out = result; + return text.eof() && !text.fail(); +} + + bool WildcardMatch(const char* psz, const char* mask) { @@ -1221,6 +1312,7 @@ bool RenameOver(boost::filesystem::path src, boost::filesystem::path dest) #endif /* WIN32 */ } +/* void FileCommit(FILE *fileout) { fflush(fileout); // harmless if redundantly called @@ -1230,6 +1322,42 @@ void FileCommit(FILE *fileout) fsync(fileno(fileout)); #endif } +*/ + +// Newer FileCommit overload from Bitcoin. +bool FileCommit(FILE *file) +{ + if (fflush(file) != 0) { // harmless if redundantly called + LogPrintf("%s: fflush failed: %d\n", __func__, errno); + return false; + } +#ifdef WIN32 + HANDLE hFile = (HANDLE)_get_osfhandle(_fileno(file)); + if (FlushFileBuffers(hFile) == 0) { + LogPrintf("%s: FlushFileBuffers failed: %d\n", __func__, GetLastError()); + return false; + } +#else + #if defined(__linux__) || defined(__NetBSD__) + if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync + LogPrintf("%s: fdatasync failed: %d\n", __func__, errno); + return false; + } + #elif defined(MAC_OSX) && defined(F_FULLFSYNC) + if (fcntl(fileno(file), F_FULLFSYNC, 0) == -1) { // Manpage says "value other than -1" is returned on success + LogPrintf("%s: fcntl F_FULLFSYNC failed: %d\n", __func__, errno); + return false; + } + #else + if (fsync(fileno(file)) != 0 && errno != EINVAL) { + LogPrintf("%s: fsync failed: %d\n", __func__, errno); + return false; + } + #endif +#endif + return true; +} + void ShrinkDebugFile() { diff --git a/src/util.h b/src/util.h index 54549c560a..df4e6c1697 100644 --- a/src/util.h +++ b/src/util.h @@ -6,10 +6,14 @@ #ifndef BITCOIN_UTIL_H #define BITCOIN_UTIL_H +#include "attributes.h" + #include "uint256.h" #include "fwd.h" #include "hash.h" +#include +#include #include #include #include @@ -26,7 +30,6 @@ #include -#include "fwd.h" #include "tinyformat.h" #ifndef WIN32 @@ -78,6 +81,13 @@ static const int64_t CENT = 1000000; #define MAX_PATH 1024 #endif +//! Substitute for C++14 std::make_unique. +template +std::unique_ptr MakeUnique(Args&&... args) +{ + return std::unique_ptr(new T(std::forward(args)...)); +} + void MilliSleep(int64_t n); extern int GetDayOfYear(int64_t timestamp); @@ -190,7 +200,7 @@ std::string EncodeBase32(const std::string& str); void ParseParameters(int argc, const char*const argv[]); bool WildcardMatch(const char* psz, const char* mask); bool WildcardMatch(const std::string& str, const std::string& mask); -void FileCommit(FILE *fileout); +bool FileCommit(FILE *fileout); std::string TimestampToHRDate(double dtm); @@ -581,5 +591,84 @@ class ThreadHandler std::map threadMap; }; + +/** + * .. A wrapper that just calls func once + */ +template void TraceThread(const char* name, Callable func) +{ + RenameThread(name); + try + { + LogPrintf("%s thread start\n", name); + func(); + LogPrintf("%s thread exit\n", name); + } + catch (const boost::thread_interrupted&) + { + LogPrintf("%s thread interrupt\n", name); + throw; + } + catch (std::exception& e) { + PrintExceptionContinue(&e, name); + throw; + } + catch (...) { + PrintExceptionContinue(nullptr, name); + throw; + } +} + +/** + * Tests if the given character is a whitespace character. The whitespace characters + * are: space, form-feed ('\f'), newline ('\n'), carriage return ('\r'), horizontal + * tab ('\t'), and vertical tab ('\v'). + * + * This function is locale independent. Under the C locale this function gives the + * same result as std::isspace. + * + * @param[in] c character to test + * @return true if the argument is a whitespace character; otherwise false + */ +constexpr inline bool IsSpace(char c) noexcept { + return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v'; +} + +/** + * Convert string to signed 32-bit integer with strict parse error feedback. + * @returns true if the entire string could be parsed as valid integer, + * false if not the entire string could be parsed or when overflow or underflow occurred. + */ +NODISCARD bool ParseInt32(const std::string& str, int32_t *out); + +/** + * Convert string to signed 64-bit integer with strict parse error feedback. + * @returns true if the entire string could be parsed as valid integer, + * false if not the entire string could be parsed or when overflow or underflow occurred. + */ +NODISCARD bool ParseInt64(const std::string& str, int64_t *out); + +/** + * Convert decimal string to unsigned 32-bit integer with strict parse error feedback. + * @returns true if the entire string could be parsed as valid integer, + * false if not the entire string could be parsed or when overflow or underflow occurred. + */ +NODISCARD bool ParseUInt32(const std::string& str, uint32_t *out); + +/** + * Convert decimal string to unsigned 64-bit integer with strict parse error feedback. + * @returns true if the entire string could be parsed as valid integer, + * false if not the entire string could be parsed or when overflow or underflow occurred. + */ +NODISCARD bool ParseUInt64(const std::string& str, uint64_t *out); + +/** + * Convert string to double with strict parse error feedback. + * @returns true if the entire string could be parsed as valid double, + * false if not the entire string could be parsed or when overflow or underflow occurred. + */ +NODISCARD bool ParseDouble(const std::string& str, double *out); + + #endif