Branch data Line data Source code
1 : : // Copyright (c) 2011-2022 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <node/blockstorage.h>
6 : :
7 : : #include <arith_uint256.h>
8 : : #include <chain.h>
9 : : #include <consensus/params.h>
10 : : #include <consensus/validation.h>
11 : : #include <dbwrapper.h>
12 : : #include <flatfile.h>
13 : : #include <hash.h>
14 : : #include <kernel/blockmanager_opts.h>
15 : : #include <kernel/chainparams.h>
16 : : #include <kernel/messagestartchars.h>
17 : : #include <kernel/notifications_interface.h>
18 : : #include <logging.h>
19 : : #include <pow.h>
20 : : #include <primitives/block.h>
21 : : #include <primitives/transaction.h>
22 : : #include <random.h>
23 : : #include <serialize.h>
24 : : #include <signet.h>
25 : : #include <span.h>
26 : : #include <streams.h>
27 : : #include <sync.h>
28 : : #include <tinyformat.h>
29 : : #include <uint256.h>
30 : : #include <undo.h>
31 : : #include <util/batchpriority.h>
32 : : #include <util/check.h>
33 : : #include <util/fs.h>
34 : : #include <util/signalinterrupt.h>
35 : : #include <util/strencodings.h>
36 : : #include <util/translation.h>
37 : : #include <validation.h>
38 : :
39 : : #include <map>
40 : : #include <ranges>
41 : : #include <unordered_map>
42 : :
43 : : namespace kernel {
44 : : static constexpr uint8_t DB_BLOCK_FILES{'f'};
45 : : static constexpr uint8_t DB_BLOCK_INDEX{'b'};
46 : : static constexpr uint8_t DB_FLAG{'F'};
47 : : static constexpr uint8_t DB_REINDEX_FLAG{'R'};
48 : : static constexpr uint8_t DB_LAST_BLOCK{'l'};
49 : : // Keys used in previous version that might still be found in the DB:
50 : : // BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
51 : : // BlockTreeDB::DB_TXINDEX{'t'}
52 : : // BlockTreeDB::ReadFlag("txindex")
53 : :
54 : 2244 : bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
55 : : {
56 : 2244 : return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
57 : : }
58 : :
59 : 33 : bool BlockTreeDB::WriteReindexing(bool fReindexing)
60 : : {
61 [ + + ]: 33 : if (fReindexing) {
62 : 17 : return Write(DB_REINDEX_FLAG, uint8_t{'1'});
63 : : } else {
64 : 16 : return Erase(DB_REINDEX_FLAG);
65 : : }
66 : : }
67 : :
68 : 1056 : void BlockTreeDB::ReadReindexing(bool& fReindexing)
69 : : {
70 : 1056 : fReindexing = Exists(DB_REINDEX_FLAG);
71 : 1056 : }
72 : :
73 : 1057 : bool BlockTreeDB::ReadLastBlockFile(int& nFile)
74 : : {
75 : 1057 : return Read(DB_LAST_BLOCK, nFile);
76 : : }
77 : :
78 : 2993 : bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
79 : : {
80 : 2993 : CDBBatch batch(*this);
81 [ + - + + ]: 4592 : for (const auto& [file, info] : fileInfo) {
82 [ + - ]: 1599 : batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
83 : : }
84 [ + - ]: 2993 : batch.Write(DB_LAST_BLOCK, nLastFile);
85 [ + + ]: 150451 : for (const CBlockIndex* bi : blockinfo) {
86 [ + - ]: 147458 : batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
87 : : }
88 [ + - ]: 5986 : return WriteBatch(batch, true);
89 : 2993 : }
90 : :
91 : 14 : bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
92 : : {
93 [ - + + - ]: 14 : return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
94 : : }
95 : :
96 : 1056 : bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
97 : : {
98 : 1056 : uint8_t ch;
99 [ + - + + ]: 1056 : if (!Read(std::make_pair(DB_FLAG, name), ch)) {
100 : : return false;
101 : : }
102 : 16 : fValue = ch == uint8_t{'1'};
103 : 16 : return true;
104 : : }
105 : :
106 : 1059 : bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
107 : : {
108 : 1059 : AssertLockHeld(::cs_main);
109 [ + - ]: 1059 : std::unique_ptr<CDBIterator> pcursor(NewIterator());
110 [ + - ]: 1059 : pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
111 : :
112 : : // Load m_block_index
113 [ + - + + ]: 136325 : while (pcursor->Valid()) {
114 [ + - + - ]: 135888 : if (interrupt) return false;
115 : 135888 : std::pair<uint8_t, uint256> key;
116 [ + - + + : 135888 : if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
+ - ]
117 : 135266 : CDiskBlockIndex diskindex;
118 [ + - + - ]: 135266 : if (pcursor->GetValue(diskindex)) {
119 : : // Construct block index object
120 [ + - + - ]: 135266 : CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
121 [ + - ]: 135266 : pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
122 : 135266 : pindexNew->nHeight = diskindex.nHeight;
123 : 135266 : pindexNew->nFile = diskindex.nFile;
124 : 135266 : pindexNew->nDataPos = diskindex.nDataPos;
125 : 135266 : pindexNew->nUndoPos = diskindex.nUndoPos;
126 : 135266 : pindexNew->nVersion = diskindex.nVersion;
127 : 135266 : pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
128 : 135266 : pindexNew->nTime = diskindex.nTime;
129 : 135266 : pindexNew->nBits = diskindex.nBits;
130 : 135266 : pindexNew->nNonce = diskindex.nNonce;
131 : 135266 : pindexNew->nStatus = diskindex.nStatus;
132 : 135266 : pindexNew->nTx = diskindex.nTx;
133 : :
134 [ + - - + ]: 135266 : if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
135 [ # # # # ]: 0 : LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
136 : 0 : return false;
137 : : }
138 : :
139 [ + - ]: 135266 : pcursor->Next();
140 : : } else {
141 [ # # ]: 0 : LogError("%s: failed to read value\n", __func__);
142 : : return false;
143 : : }
144 : : } else {
145 : : break;
146 : : }
147 : : }
148 : :
149 : : return true;
150 : 1059 : }
151 : : } // namespace kernel
152 : :
153 : : namespace node {
154 : :
155 : 850355964 : bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
156 : : {
157 : : // First sort by most total work, ...
158 [ + + ]: 850355964 : if (pa->nChainWork > pb->nChainWork) return false;
159 [ + + ]: 549921347 : if (pa->nChainWork < pb->nChainWork) return true;
160 : :
161 : : // ... then by earliest time received, ...
162 [ + + ]: 7212983 : if (pa->nSequenceId < pb->nSequenceId) return false;
163 [ + + ]: 7175296 : if (pa->nSequenceId > pb->nSequenceId) return true;
164 : :
165 : : // Use pointer address as tie breaker (should only happen with blocks
166 : : // loaded from disk, as those all have id 0).
167 [ + + ]: 7159020 : if (pa < pb) return false;
168 [ + + ]: 7158020 : if (pa > pb) return true;
169 : :
170 : : // Identical blocks.
171 : : return false;
172 : : }
173 : :
174 : 2776819 : bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
175 : : {
176 : 2776819 : return pa->nHeight < pb->nHeight;
177 : : }
178 : :
179 : 2114 : std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
180 : : {
181 : 2114 : AssertLockHeld(cs_main);
182 : 2114 : std::vector<CBlockIndex*> rv;
183 [ + - ]: 2114 : rv.reserve(m_block_index.size());
184 [ + - + + ]: 271986 : for (auto& [_, block_index] : m_block_index) {
185 [ + - ]: 269872 : rv.push_back(&block_index);
186 : : }
187 : 2114 : return rv;
188 : 0 : }
189 : :
190 : 869160 : CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
191 : : {
192 : 869160 : AssertLockHeld(cs_main);
193 : 869160 : BlockMap::iterator it = m_block_index.find(hash);
194 [ + + ]: 869160 : return it == m_block_index.end() ? nullptr : &it->second;
195 : : }
196 : :
197 : 6 : const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
198 : : {
199 : 6 : AssertLockHeld(cs_main);
200 : 6 : BlockMap::const_iterator it = m_block_index.find(hash);
201 [ + + ]: 6 : return it == m_block_index.end() ? nullptr : &it->second;
202 : : }
203 : :
204 : 138207 : CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
205 : : {
206 : 138207 : AssertLockHeld(cs_main);
207 : :
208 [ + + ]: 138207 : auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
209 [ + + ]: 138207 : if (!inserted) {
210 : 3 : return &mi->second;
211 : : }
212 : 138204 : CBlockIndex* pindexNew = &(*mi).second;
213 : :
214 : : // We assign the sequence id to blocks only when the full data is available,
215 : : // to avoid miners withholding blocks but broadcasting headers, to get a
216 : : // competitive advantage.
217 : 138204 : pindexNew->nSequenceId = 0;
218 : :
219 : 138204 : pindexNew->phashBlock = &((*mi).first);
220 : 138204 : BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
221 [ + + ]: 138204 : if (miPrev != m_block_index.end()) {
222 : 137751 : pindexNew->pprev = &(*miPrev).second;
223 : 137751 : pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
224 : 137751 : pindexNew->BuildSkip();
225 : : }
226 [ + + + + ]: 138204 : pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
227 [ + + + - ]: 414612 : pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
228 : 138204 : pindexNew->RaiseValidity(BLOCK_VALID_TREE);
229 [ + + + + ]: 138204 : if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
230 : 118829 : best_header = pindexNew;
231 : : }
232 : :
233 : 138204 : m_dirty_blockindex.insert(pindexNew);
234 : :
235 : 138204 : return pindexNew;
236 : : }
237 : :
238 : 69 : void BlockManager::PruneOneBlockFile(const int fileNumber)
239 : : {
240 : 69 : AssertLockHeld(cs_main);
241 : 69 : LOCK(cs_LastBlockFile);
242 : :
243 [ + + ]: 119173 : for (auto& entry : m_block_index) {
244 : 119104 : CBlockIndex* pindex = &entry.second;
245 [ + + ]: 119104 : if (pindex->nFile == fileNumber) {
246 : 16569 : pindex->nStatus &= ~BLOCK_HAVE_DATA;
247 : 16569 : pindex->nStatus &= ~BLOCK_HAVE_UNDO;
248 : 16569 : pindex->nFile = 0;
249 : 16569 : pindex->nDataPos = 0;
250 : 16569 : pindex->nUndoPos = 0;
251 [ + - ]: 16569 : m_dirty_blockindex.insert(pindex);
252 : :
253 : : // Prune from m_blocks_unlinked -- any block we prune would have
254 : : // to be downloaded again in order to consider its chain, at which
255 : : // point it would be considered as a candidate for
256 : : // m_blocks_unlinked or setBlockIndexCandidates.
257 : 16569 : auto range = m_blocks_unlinked.equal_range(pindex->pprev);
258 [ - + ]: 16569 : while (range.first != range.second) {
259 : 0 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
260 : 0 : range.first++;
261 [ # # ]: 0 : if (_it->second == pindex) {
262 : 0 : m_blocks_unlinked.erase(_it);
263 : : }
264 : : }
265 : : }
266 : : }
267 : :
268 [ + - ]: 69 : m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
269 [ + - ]: 69 : m_dirty_fileinfo.insert(fileNumber);
270 : 69 : }
271 : :
272 : 26 : void BlockManager::FindFilesToPruneManual(
273 : : std::set<int>& setFilesToPrune,
274 : : int nManualPruneHeight,
275 : : const Chainstate& chain,
276 : : ChainstateManager& chainman)
277 : : {
278 [ + - - + ]: 26 : assert(IsPruneMode() && nManualPruneHeight > 0);
279 : :
280 [ + - ]: 26 : LOCK2(cs_main, cs_LastBlockFile);
281 [ - + ]: 26 : if (chain.m_chain.Height() < 0) {
282 [ # # ]: 0 : return;
283 : : }
284 : :
285 [ + - ]: 26 : const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
286 : :
287 : 26 : int count = 0;
288 [ + + ]: 146 : for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
289 [ + + ]: 120 : const auto& fileinfo = m_blockfile_info[fileNumber];
290 [ + + + + : 120 : if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
- + ]
291 : 68 : continue;
292 : : }
293 : :
294 [ + - ]: 52 : PruneOneBlockFile(fileNumber);
295 [ + - ]: 52 : setFilesToPrune.insert(fileNumber);
296 : 52 : count++;
297 : : }
298 [ + - + - ]: 26 : LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
299 : : chain.GetRole(), last_block_can_prune, count);
300 [ - - + - ]: 52 : }
301 : :
302 : 541 : void BlockManager::FindFilesToPrune(
303 : : std::set<int>& setFilesToPrune,
304 : : int last_prune,
305 : : const Chainstate& chain,
306 : : ChainstateManager& chainman)
307 : : {
308 [ + - ]: 541 : LOCK2(cs_main, cs_LastBlockFile);
309 : : // Distribute our -prune budget over all chainstates.
310 [ + - ]: 541 : const auto target = std::max(
311 [ + - + + ]: 699 : MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
312 : 541 : const uint64_t target_sync_height = chainman.m_best_header->nHeight;
313 : :
314 [ + + + - ]: 541 : if (chain.m_chain.Height() < 0 || target == 0) {
315 : : return;
316 : : }
317 [ + + ]: 529 : if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
318 : : return;
319 : : }
320 : :
321 [ + - + - ]: 425 : const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
322 : :
323 [ + - ]: 425 : uint64_t nCurrentUsage = CalculateCurrentUsage();
324 : : // We don't check to prune until after we've allocated new space for files
325 : : // So we should leave a buffer under our target to account for another allocation
326 : : // before the next pruning.
327 : 425 : uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
328 : 425 : uint64_t nBytesToPrune;
329 : 425 : int count = 0;
330 : :
331 [ + + ]: 425 : if (nCurrentUsage + nBuffer >= target) {
332 : : // On a prune event, the chainstate DB is flushed.
333 : : // To avoid excessive prune events negating the benefit of high dbcache
334 : : // values, we should not prune too rapidly.
335 : : // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
336 [ + - ]: 9 : const auto chain_tip_height = chain.m_chain.Height();
337 [ + - - + : 9 : if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
- - ]
338 : : // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
339 : 0 : static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
340 : 0 : const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
341 : 0 : nBuffer += average_block_size * remaining_blocks;
342 : : }
343 : :
344 [ + - ]: 35 : for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
345 [ + + ]: 35 : const auto& fileinfo = m_blockfile_info[fileNumber];
346 : 35 : nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
347 : :
348 [ + + ]: 35 : if (fileinfo.nSize == 0) {
349 : 13 : continue;
350 : : }
351 : :
352 [ + + ]: 22 : if (nCurrentUsage + nBuffer < target) { // are we below our target?
353 : : break;
354 : : }
355 : :
356 : : // don't prune files that could have a block that's not within the allowable
357 : : // prune range for the chain being pruned.
358 [ + - - + ]: 13 : if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
359 : 0 : continue;
360 : : }
361 : :
362 [ + - ]: 13 : PruneOneBlockFile(fileNumber);
363 : : // Queue up the files for removal
364 [ + - ]: 13 : setFilesToPrune.insert(fileNumber);
365 : 13 : nCurrentUsage -= nBytesToPrune;
366 : 13 : count++;
367 : : }
368 : : }
369 : :
370 [ + - + - : 425 : LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
+ - + - +
- ]
371 : : chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
372 : : (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
373 : : min_block_to_prune, last_block_can_prune, count);
374 [ + - + - ]: 1082 : }
375 : :
376 : 20814 : void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
377 : 20814 : AssertLockHeld(::cs_main);
378 : 20814 : m_prune_locks[name] = lock_info;
379 : 20814 : }
380 : :
381 : 270532 : CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
382 : : {
383 : 270532 : AssertLockHeld(cs_main);
384 : :
385 [ + + ]: 270532 : if (hash.IsNull()) {
386 : : return nullptr;
387 : : }
388 : :
389 [ + + ]: 269911 : const auto [mi, inserted]{m_block_index.try_emplace(hash)};
390 [ + + ]: 269911 : CBlockIndex* pindex = &(*mi).second;
391 [ + + ]: 269911 : if (inserted) {
392 : 134281 : pindex->phashBlock = &((*mi).first);
393 : : }
394 : : return pindex;
395 : : }
396 : :
397 : 1059 : bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
398 : : {
399 [ + - + - ]: 1059 : if (!m_block_tree_db->LoadBlockIndexGuts(
400 [ + - ]: 271591 : GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
401 : : return false;
402 : : }
403 : :
404 [ + + ]: 1059 : if (snapshot_blockhash) {
405 : 11 : const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
406 [ + + ]: 11 : if (!maybe_au_data) {
407 [ + - + - : 3 : m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
+ - ]
408 : 1 : return false;
409 : : }
410 : 10 : const AssumeutxoData& au_data = *Assert(maybe_au_data);
411 : 10 : m_snapshot_height = au_data.height;
412 : 10 : CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
413 : :
414 : : // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
415 : : // to disk, we must bootstrap the value for assumedvalid chainstates
416 : : // from the hardcoded assumeutxo chainparams.
417 : 10 : base->m_chain_tx_count = au_data.m_chain_tx_count;
418 [ + - ]: 20 : LogPrintf("[snapshot] set m_chain_tx_count=%d for %s\n", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
419 : : } else {
420 : : // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
421 : : // is null. This is relevant during snapshot completion, when the blockman may be loaded
422 : : // with a height that then needs to be cleared after the snapshot is fully validated.
423 [ + + ]: 1048 : m_snapshot_height.reset();
424 : : }
425 : :
426 : 1058 : Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
427 : :
428 : : // Calculate nChainWork
429 : 1058 : std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
430 [ + - ]: 1058 : std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
431 : : CBlockIndexHeightOnlyComparator());
432 : :
433 : 1058 : CBlockIndex* previous_index{nullptr};
434 [ + + ]: 136024 : for (CBlockIndex* pindex : vSortedByHeight) {
435 [ + - + - ]: 134967 : if (m_interrupt) return false;
436 [ + + + + ]: 134967 : if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
437 [ + - ]: 1 : LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
438 : : return false;
439 : : }
440 : 134966 : previous_index = pindex;
441 [ + - + + : 404898 : pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ - ]
442 [ + + + + ]: 134966 : pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
443 : :
444 : : // We can link the chain of blocks for which we've received transactions at some point, or
445 : : // blocks that are assumed-valid on the basis of snapshot load (see
446 : : // PopulateAndValidateSnapshot()).
447 : : // Pruned nodes may have deleted the block.
448 [ + + ]: 134966 : if (pindex->nTx > 0) {
449 [ + + ]: 133542 : if (pindex->pprev) {
450 [ + + + + : 132924 : if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
+ - ]
451 [ + - ]: 4 : pindex->GetBlockHash() == *snapshot_blockhash) {
452 : : // Should have been set above; don't disturb it with code below.
453 [ + - ]: 4 : Assert(pindex->m_chain_tx_count > 0);
454 [ + + ]: 132916 : } else if (pindex->pprev->m_chain_tx_count > 0) {
455 : 132912 : pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
456 : : } else {
457 : 4 : pindex->m_chain_tx_count = 0;
458 [ + - ]: 4 : m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
459 : : }
460 : : } else {
461 : 622 : pindex->m_chain_tx_count = pindex->nTx;
462 : : }
463 : : }
464 [ + + + + : 134966 : if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
- + ]
465 : 0 : pindex->nStatus |= BLOCK_FAILED_CHILD;
466 [ # # ]: 0 : m_dirty_blockindex.insert(pindex);
467 : : }
468 [ + + ]: 134966 : if (pindex->pprev) {
469 [ + - ]: 134315 : pindex->BuildSkip();
470 : : }
471 : : }
472 : :
473 : : return true;
474 : 1058 : }
475 : :
476 : 2993 : bool BlockManager::WriteBlockIndexDB()
477 : : {
478 : 2993 : AssertLockHeld(::cs_main);
479 : 2993 : std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
480 [ + - ]: 2993 : vFiles.reserve(m_dirty_fileinfo.size());
481 [ + + ]: 4592 : for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
482 [ + - ]: 1599 : vFiles.emplace_back(*it, &m_blockfile_info[*it]);
483 : 1599 : m_dirty_fileinfo.erase(it++);
484 : : }
485 : 2993 : std::vector<const CBlockIndex*> vBlocks;
486 [ + - ]: 2993 : vBlocks.reserve(m_dirty_blockindex.size());
487 [ + + ]: 150451 : for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
488 [ + - ]: 147458 : vBlocks.push_back(*it);
489 : 147458 : m_dirty_blockindex.erase(it++);
490 : : }
491 [ + - + - ]: 5986 : int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
492 [ + - - + ]: 2993 : if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
493 : 0 : return false;
494 : : }
495 : : return true;
496 : 2993 : }
497 : :
498 : 1059 : bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
499 : : {
500 [ + + ]: 1059 : if (!LoadBlockIndex(snapshot_blockhash)) {
501 : : return false;
502 : : }
503 : 1057 : int max_blockfile_num{0};
504 : :
505 : : // Load block file info
506 : 1057 : m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
507 : 1057 : m_blockfile_info.resize(max_blockfile_num + 1);
508 : 1057 : LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
509 [ + + ]: 2244 : for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
510 : 1187 : m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
511 : : }
512 [ + - ]: 1057 : LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
513 : 1057 : for (int nFile = max_blockfile_num + 1; true; nFile++) {
514 : 1057 : CBlockFileInfo info;
515 [ - + ]: 1057 : if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
516 : 0 : m_blockfile_info.push_back(info);
517 : : } else {
518 : : break;
519 : : }
520 : 0 : }
521 : :
522 : : // Check presence of blk files
523 : 1057 : LogPrintf("Checking all blk files are present...\n");
524 : 1057 : std::set<int> setBlkDataFiles;
525 [ + + + + ]: 135994 : for (const auto& [_, block_index] : m_block_index) {
526 [ + + ]: 134937 : if (block_index.nStatus & BLOCK_HAVE_DATA) {
527 [ + - ]: 119136 : setBlkDataFiles.insert(block_index.nFile);
528 : : }
529 : : }
530 [ + + ]: 1749 : for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
531 [ + - ]: 693 : FlatFilePos pos(*it, 0);
532 [ + - + + ]: 1386 : if (OpenBlockFile(pos, true).IsNull()) {
533 : : return false;
534 : : }
535 : : }
536 : :
537 : 1056 : {
538 : : // Initialize the blockfile cursors.
539 [ + - ]: 1056 : LOCK(cs_LastBlockFile);
540 [ + + ]: 2242 : for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
541 [ + - ]: 1186 : const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
542 [ + - + + ]: 2372 : m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
543 : : }
544 : 0 : }
545 : :
546 : : // Check whether we have ever pruned block & undo files
547 [ + - + - ]: 1056 : m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
548 [ + + ]: 1056 : if (m_have_pruned) {
549 [ + - ]: 16 : LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
550 : : }
551 : :
552 : : // Check whether we need to continue reindexing
553 : 1056 : bool fReindexing = false;
554 [ + - ]: 1056 : m_block_tree_db->ReadReindexing(fReindexing);
555 [ + + ]: 1056 : if (fReindexing) m_blockfiles_indexed = false;
556 : :
557 : : return true;
558 : 1057 : }
559 : :
560 : 1059 : void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
561 : : {
562 : 1059 : AssertLockHeld(::cs_main);
563 [ + - ]: 2118 : int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
564 [ + + ]: 1059 : if (!m_have_pruned) {
565 : : return;
566 : : }
567 : :
568 : 18 : std::set<int> block_files_to_prune;
569 [ + + ]: 108 : for (int file_number = 0; file_number < max_blockfile; file_number++) {
570 [ + + ]: 90 : if (m_blockfile_info[file_number].nSize == 0) {
571 [ + - ]: 61 : block_files_to_prune.insert(file_number);
572 : : }
573 : : }
574 : :
575 [ + - ]: 18 : UnlinkPrunedFiles(block_files_to_prune);
576 : 18 : }
577 : :
578 : 181812 : const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
579 : : {
580 : 181812 : const MapCheckpoints& checkpoints = data.mapCheckpoints;
581 : :
582 [ + + ]: 184050 : for (const MapCheckpoints::value_type& i : checkpoints | std::views::reverse) {
583 : 183357 : const uint256& hash = i.second;
584 : 183357 : const CBlockIndex* pindex = LookupBlockIndex(hash);
585 [ + + ]: 183357 : if (pindex) {
586 : : return pindex;
587 : : }
588 : : }
589 : : return nullptr;
590 : : }
591 : :
592 : 642 : bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
593 : : {
594 : 642 : AssertLockHeld(::cs_main);
595 [ + + + - : 642 : return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
- + ]
596 : : }
597 : :
598 : 109 : const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
599 : : {
600 : 109 : AssertLockHeld(::cs_main);
601 : 109 : const CBlockIndex* last_block = &upper_block;
602 [ - + ]: 109 : assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
603 [ + + + + ]: 39065 : while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
604 [ + + ]: 38972 : if (lower_block) {
605 : : // Return if we reached the lower_block
606 [ + + ]: 38823 : if (last_block == lower_block) return lower_block;
607 : : // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
608 : : // and so far this is not allowed.
609 [ - + ]: 38807 : assert(last_block->nHeight >= lower_block->nHeight);
610 : : }
611 : : last_block = last_block->pprev;
612 : : }
613 [ - + ]: 93 : assert(last_block != nullptr);
614 : : return last_block;
615 : : }
616 : :
617 : 40 : bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
618 : : {
619 [ + - ]: 40 : if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
620 : 40 : return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
621 : : }
622 : :
623 : : // If we're using -prune with -reindex, then delete block files that will be ignored by the
624 : : // reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
625 : : // is missing, do the same here to delete any later block files after a gap. Also delete all
626 : : // rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
627 : : // is in sync with what's actually on disk by the time we start downloading, so that pruning
628 : : // works correctly.
629 : 3 : void BlockManager::CleanupBlockRevFiles() const
630 : : {
631 [ + - ]: 3 : std::map<std::string, fs::path> mapBlockFiles;
632 : :
633 : : // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
634 : : // Remove the rev files immediately and insert the blk file paths into an
635 : : // ordered map keyed by block file index.
636 [ + - ]: 3 : LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
637 [ + - + + : 30 : for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
+ + ]
638 [ + - + - ]: 96 : const std::string path = fs::PathToString(it->path().filename());
639 [ + - + + ]: 48 : if (fs::is_regular_file(*it) &&
640 [ + + + + ]: 24 : path.length() == 12 &&
641 [ + - + - ]: 36 : path.substr(8,4) == ".dat")
642 : : {
643 [ + - + + ]: 18 : if (path.substr(0, 3) == "blk") {
644 [ + - + - : 18 : mapBlockFiles[path.substr(3, 5)] = it->path();
+ - ]
645 [ + - + - ]: 9 : } else if (path.substr(0, 3) == "rev") {
646 [ + - ]: 9 : remove(it->path());
647 : : }
648 : : }
649 [ + - ]: 24 : }
650 : :
651 : : // Remove all block files that aren't part of a contiguous set starting at
652 : : // zero by walking the ordered map (keys are block file indices) by
653 : : // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
654 : : // start removing block files.
655 : 3 : int nContigCounter = 0;
656 [ + + ]: 12 : for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
657 [ + - - + ]: 9 : if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
658 : 0 : nContigCounter++;
659 : 0 : continue;
660 : : }
661 [ + - ]: 9 : remove(item.second);
662 : : }
663 : 3 : }
664 : :
665 : 4 : CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
666 : : {
667 : 4 : LOCK(cs_LastBlockFile);
668 : :
669 [ + - + - ]: 4 : return &m_blockfile_info.at(n);
670 : 4 : }
671 : :
672 : 119541 : bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const
673 : : {
674 : : // Open history file to append
675 : 119541 : AutoFile fileout{OpenUndoFile(pos)};
676 [ - + ]: 119541 : if (fileout.IsNull()) {
677 [ # # ]: 0 : LogError("%s: OpenUndoFile failed\n", __func__);
678 : : return false;
679 : : }
680 : :
681 : : // Write index header
682 [ + - ]: 119541 : unsigned int nSize = GetSerializeSize(blockundo);
683 [ + - + - ]: 119541 : fileout << GetParams().MessageStart() << nSize;
684 : :
685 : : // Write undo data
686 [ + - ]: 119541 : long fileOutPos = fileout.tell();
687 : 119541 : pos.nPos = (unsigned int)fileOutPos;
688 [ + - ]: 119541 : fileout << blockundo;
689 : :
690 : : // calculate & write checksum
691 [ + - ]: 119541 : HashWriter hasher{};
692 [ + - ]: 119541 : hasher << hashBlock;
693 [ + - ]: 119541 : hasher << blockundo;
694 [ + - ]: 239082 : fileout << hasher.GetHash();
695 : :
696 : : return true;
697 : 119541 : }
698 : :
699 : 50436 : bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const
700 : : {
701 [ + - ]: 100872 : const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
702 : :
703 : : // Open history file to read
704 : 50436 : AutoFile filein{OpenUndoFile(pos, true)};
705 [ + + ]: 50436 : if (filein.IsNull()) {
706 [ + - + - ]: 7 : LogError("%s: OpenUndoFile failed for %s\n", __func__, pos.ToString());
707 : 7 : return false;
708 : : }
709 : :
710 : : // Read block
711 : 50429 : uint256 hashChecksum;
712 [ + - ]: 50429 : HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
713 : 50429 : try {
714 [ + - ]: 50429 : verifier << index.pprev->GetBlockHash();
715 [ + + ]: 50429 : verifier >> blockundo;
716 [ + - ]: 50428 : filein >> hashChecksum;
717 [ - + ]: 1 : } catch (const std::exception& e) {
718 [ + - + - ]: 1 : LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
719 : 1 : return false;
720 : 1 : }
721 : :
722 : : // Verify checksum
723 [ + - - + ]: 50428 : if (hashChecksum != verifier.GetHash()) {
724 [ # # # # ]: 0 : LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString());
725 : 0 : return false;
726 : : }
727 : :
728 : : return true;
729 : 50436 : }
730 : :
731 : 3092 : bool BlockManager::FlushUndoFile(int block_file, bool finalize)
732 : : {
733 : 3092 : FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
734 [ - + ]: 3092 : if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
735 [ # # ]: 0 : m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
736 : 0 : return false;
737 : : }
738 : : return true;
739 : : }
740 : :
741 : 3091 : bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
742 : : {
743 : 3091 : bool success = true;
744 : 3091 : LOCK(cs_LastBlockFile);
745 : :
746 [ + - ]: 3091 : if (m_blockfile_info.size() < 1) {
747 : : // Return if we haven't loaded any blockfiles yet. This happens during
748 : : // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
749 : : // then calls FlushStateToDisk()), resulting in a call to this function before we
750 : : // have populated `m_blockfile_info` via LoadBlockIndexDB().
751 : : return true;
752 : : }
753 [ - + ]: 3091 : assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
754 : :
755 [ + - ]: 3091 : FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
756 [ + - - + ]: 3091 : if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
757 [ # # # # ]: 0 : m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
758 : 0 : success = false;
759 : : }
760 : : // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
761 : : // e.g. during IBD or a sync after a node going offline
762 [ + - ]: 3091 : if (!fFinalize || finalize_undo) {
763 [ + - - + ]: 3091 : if (!FlushUndoFile(blockfile_num, finalize_undo)) {
764 : 0 : success = false;
765 : : }
766 : : }
767 : : return success;
768 : 3091 : }
769 : :
770 : 252992 : BlockfileType BlockManager::BlockfileTypeForHeight(int height)
771 : : {
772 [ + + ]: 252992 : if (!m_snapshot_height) {
773 : : return BlockfileType::NORMAL;
774 : : }
775 [ + + ]: 3321 : return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
776 : : }
777 : :
778 : 2993 : bool BlockManager::FlushChainstateBlockFile(int tip_height)
779 : : {
780 : 2993 : LOCK(cs_LastBlockFile);
781 [ + - + + ]: 2993 : auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
782 : : // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
783 : : // but no blocks past the snapshot height have been written yet, so there
784 : : // is no data associated with the chainstate, and it is safe not to flush.
785 [ + + ]: 2993 : if (cursor) {
786 [ + - ]: 2969 : return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
787 : : }
788 : : // No need to log warnings in this case.
789 : : return true;
790 : 2993 : }
791 : :
792 : 20529 : uint64_t BlockManager::CalculateCurrentUsage()
793 : : {
794 : 20529 : LOCK(cs_LastBlockFile);
795 : :
796 : 20529 : uint64_t retval = 0;
797 [ + + ]: 43084 : for (const CBlockFileInfo& file : m_blockfile_info) {
798 : 22555 : retval += file.nSize + file.nUndoSize;
799 : : }
800 [ + - ]: 20529 : return retval;
801 : 20529 : }
802 : :
803 : 52 : void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
804 : : {
805 : 52 : std::error_code ec;
806 [ + + ]: 181 : for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
807 : 129 : FlatFilePos pos(*it, 0);
808 : 129 : const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
809 : 129 : const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
810 [ + + ]: 129 : if (removed_blockfile || removed_undofile) {
811 [ + - ]: 69 : LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
812 : : }
813 : : }
814 : 52 : }
815 : :
816 : 294732 : AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
817 : : {
818 [ + - ]: 589464 : return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key};
819 : : }
820 : :
821 : : /** Open an undo file (rev?????.dat) */
822 : 169977 : AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
823 : : {
824 [ + - ]: 339954 : return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key};
825 : : }
826 : :
827 : 32 : fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
828 : : {
829 : 32 : return m_block_file_seq.FileName(pos);
830 : : }
831 : :
832 : 120413 : FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
833 : : {
834 : 120413 : LOCK(cs_LastBlockFile);
835 : :
836 [ + - ]: 120413 : const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
837 : :
838 [ + + ]: 120413 : if (!m_blockfile_cursors[chain_type]) {
839 : : // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
840 [ - + ]: 10 : assert(chain_type == BlockfileType::ASSUMED);
841 : 10 : const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
842 [ + - ]: 10 : m_blockfile_cursors[chain_type] = new_cursor;
843 [ + - + - : 10 : LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
+ - ]
844 : : }
845 [ + + ]: 120413 : const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
846 : :
847 : 120413 : int nFile = last_blockfile;
848 [ + + ]: 120413 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
849 [ + - ]: 16 : m_blockfile_info.resize(nFile + 1);
850 : : }
851 : :
852 : 120413 : bool finalize_undo = false;
853 : 120413 : unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
854 : : // Use smaller blockfiles in test-only -fastprune mode - but avoid
855 : : // the possibility of having a block not fit into the block file.
856 [ + + ]: 120413 : if (m_opts.fast_prune) {
857 : 17171 : max_blockfile_size = 0x10000; // 64kiB
858 [ + + ]: 17171 : if (nAddSize >= max_blockfile_size) {
859 : : // dynamically adjust the blockfile size to be larger than the added size
860 : 2 : max_blockfile_size = nAddSize + 1;
861 : : }
862 : : }
863 [ - + ]: 120413 : assert(nAddSize < max_blockfile_size);
864 : :
865 [ + + ]: 120535 : while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
866 : : // when the undo file is keeping up with the block file, we want to flush it explicitly
867 : : // when it is lagging behind (more blocks arrive than are being connected), we let the
868 : : // undo block write case handle it
869 : 244 : finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
870 [ + - + - ]: 122 : Assert(m_blockfile_cursors[chain_type])->undo_height);
871 : :
872 : : // Try the next unclaimed blockfile number
873 : 122 : nFile = this->MaxBlockfileNum() + 1;
874 : : // Set to increment MaxBlockfileNum() for next iteration
875 [ + - ]: 122 : m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
876 : :
877 [ + - ]: 122 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
878 [ + - ]: 122 : m_blockfile_info.resize(nFile + 1);
879 : : }
880 : : }
881 : 120413 : FlatFilePos pos;
882 : 120413 : pos.nFile = nFile;
883 : 120413 : pos.nPos = m_blockfile_info[nFile].nSize;
884 : :
885 [ + + ]: 120413 : if (nFile != last_blockfile) {
886 [ + - + - : 244 : LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
+ - + - ]
887 : : last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
888 : :
889 : : // Do not propagate the return code. The flush concerns a previous block
890 : : // and undo file that has already been written to. If a flush fails
891 : : // here, and we crash, there is no expected additional block data
892 : : // inconsistency arising from the flush failure here. However, the undo
893 : : // data may be inconsistent after a crash if the flush is called during
894 : : // a reindex. A flush error might also leave some of the data files
895 : : // untrimmed.
896 [ + - - + ]: 122 : if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
897 [ # # # # : 0 : LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
# # ]
898 : : "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
899 : : last_blockfile, finalize_undo, nFile);
900 : : }
901 : : // No undo data yet in the new file, so reset our undo-height tracking.
902 [ + - ]: 122 : m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
903 : : }
904 : :
905 : 120413 : m_blockfile_info[nFile].AddBlock(nHeight, nTime);
906 [ + - ]: 120413 : m_blockfile_info[nFile].nSize += nAddSize;
907 : :
908 : 120413 : bool out_of_space;
909 [ + - ]: 120413 : size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
910 [ - + ]: 120413 : if (out_of_space) {
911 [ # # # # ]: 0 : m_opts.notifications.fatalError(_("Disk space is too low!"));
912 : 0 : return {};
913 : : }
914 [ + + + + ]: 120413 : if (bytes_allocated != 0 && IsPruneMode()) {
915 : 405 : m_check_for_pruning = true;
916 : : }
917 : :
918 [ + - ]: 120413 : m_dirty_fileinfo.insert(nFile);
919 : 120413 : return pos;
920 : 120413 : }
921 : :
922 : 3478 : void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
923 : : {
924 : 3478 : LOCK(cs_LastBlockFile);
925 : :
926 : : // Update the cursor so it points to the last file.
927 [ + - ]: 3478 : const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)};
928 [ + - ]: 3478 : auto& cursor{m_blockfile_cursors[chain_type]};
929 [ + - + + ]: 3478 : if (!cursor || cursor->file_num < pos.nFile) {
930 [ + - ]: 1 : m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
931 : : }
932 : :
933 : : // Update the file information with the current block.
934 : 3478 : const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
935 : 3478 : const int nFile = pos.nFile;
936 [ + + ]: 3478 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
937 [ + - ]: 15 : m_blockfile_info.resize(nFile + 1);
938 : : }
939 : 3478 : m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
940 [ + + ]: 3478 : m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
941 [ + - ]: 3478 : m_dirty_fileinfo.insert(nFile);
942 : 3478 : }
943 : :
944 : 119541 : bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
945 : : {
946 : 119541 : pos.nFile = nFile;
947 : :
948 : 119541 : LOCK(cs_LastBlockFile);
949 : :
950 [ + - ]: 119541 : pos.nPos = m_blockfile_info[nFile].nUndoSize;
951 : 119541 : m_blockfile_info[nFile].nUndoSize += nAddSize;
952 [ + - ]: 119541 : m_dirty_fileinfo.insert(nFile);
953 : :
954 : 119541 : bool out_of_space;
955 [ + - ]: 119541 : size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
956 [ - + ]: 119541 : if (out_of_space) {
957 [ # # # # ]: 0 : return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
958 : : }
959 [ + + + + ]: 119541 : if (bytes_allocated != 0 && IsPruneMode()) {
960 : 93 : m_check_for_pruning = true;
961 : : }
962 : :
963 : : return true;
964 : 119541 : }
965 : :
966 : 120413 : bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
967 : : {
968 : : // Open history file to append
969 : 120413 : AutoFile fileout{OpenBlockFile(pos)};
970 [ - + ]: 120413 : if (fileout.IsNull()) {
971 [ # # ]: 0 : LogError("%s: OpenBlockFile failed\n", __func__);
972 : : return false;
973 : : }
974 : :
975 : : // Write index header
976 : 120413 : unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block));
977 [ + - + - ]: 120413 : fileout << GetParams().MessageStart() << nSize;
978 : :
979 : : // Write block
980 [ + - ]: 120413 : long fileOutPos = fileout.tell();
981 : 120413 : pos.nPos = (unsigned int)fileOutPos;
982 [ + - ]: 240826 : fileout << TX_WITH_WITNESS(block);
983 : :
984 : : return true;
985 : 120413 : }
986 : :
987 : 124922 : bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
988 : : {
989 : 124922 : AssertLockHeld(::cs_main);
990 : 124922 : const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
991 [ + + + - ]: 249844 : auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
992 : :
993 : : // Write undo information to disk
994 [ + + ]: 124922 : if (block.GetUndoPos().IsNull()) {
995 : 119541 : FlatFilePos _pos;
996 [ - + ]: 119541 : if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) {
997 : 0 : LogError("%s: FindUndoPos failed\n", __func__);
998 : 0 : return false;
999 : : }
1000 [ - + ]: 119541 : if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
1001 [ # # ]: 0 : return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
1002 : : }
1003 : : // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1004 : : // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1005 : : // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1006 : : // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1007 : : // the FindNextBlockPos function
1008 [ + + + + ]: 119541 : if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
1009 : : // Do not propagate the return code, a failed flush here should not
1010 : : // be an indication for a failed write. If it were propagated here,
1011 : : // the caller would assume the undo data not to be written, when in
1012 : : // fact it is. Note though, that a failed flush might leave the data
1013 : : // file untrimmed.
1014 [ - + ]: 1 : if (!FlushUndoFile(_pos.nFile, true)) {
1015 [ # # ]: 0 : LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
1016 : : }
1017 [ + + + + ]: 119540 : } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
1018 : 110424 : cursor.undo_height = block.nHeight;
1019 : : }
1020 : : // update nUndoPos in block index
1021 : 119541 : block.nUndoPos = _pos.nPos;
1022 : 119541 : block.nStatus |= BLOCK_HAVE_UNDO;
1023 : 119541 : m_dirty_blockindex.insert(&block);
1024 : : }
1025 : :
1026 : : return true;
1027 : : }
1028 : :
1029 : 129910 : bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const
1030 : : {
1031 : 129910 : block.SetNull();
1032 : :
1033 : : // Open history file to read
1034 : 129910 : AutoFile filein{OpenBlockFile(pos, true)};
1035 [ + + ]: 129910 : if (filein.IsNull()) {
1036 [ + - + - ]: 106 : LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1037 : 106 : return false;
1038 : : }
1039 : :
1040 : : // Read block
1041 : 129804 : try {
1042 [ + - ]: 129804 : filein >> TX_WITH_WITNESS(block);
1043 [ - - ]: 0 : } catch (const std::exception& e) {
1044 [ - - - - ]: 0 : LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
1045 : 0 : return false;
1046 : 0 : }
1047 : :
1048 : : // Check the header
1049 [ + - + - : 129804 : if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
+ + ]
1050 [ + - + - ]: 4 : LogError("%s: Errors in block header at %s\n", __func__, pos.ToString());
1051 : 4 : return false;
1052 : : }
1053 : :
1054 : : // Signet only: check block solution
1055 [ + + + - : 129800 : if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
- + ]
1056 [ # # # # ]: 0 : LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString());
1057 : 0 : return false;
1058 : : }
1059 : :
1060 : : return true;
1061 : 129910 : }
1062 : :
1063 : 124275 : bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const
1064 : : {
1065 [ + - ]: 248550 : const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1066 : :
1067 [ + + ]: 124275 : if (!ReadBlockFromDisk(block, block_pos)) {
1068 : : return false;
1069 : : }
1070 [ - + ]: 124168 : if (block.GetHash() != index.GetBlockHash()) {
1071 [ # # # # ]: 0 : LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString());
1072 : 0 : return false;
1073 : : }
1074 : : return true;
1075 : : }
1076 : :
1077 : 43554 : bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
1078 : : {
1079 : 43554 : FlatFilePos hpos = pos;
1080 : : // If nPos is less than 8 the pos is null and we don't have the block data
1081 : : // Return early to prevent undefined behavior of unsigned int underflow
1082 [ - + ]: 43554 : if (hpos.nPos < 8) {
1083 [ # # ]: 0 : LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1084 : 0 : return false;
1085 : : }
1086 : 43554 : hpos.nPos -= 8; // Seek back 8 bytes for meta header
1087 : 43554 : AutoFile filein{OpenBlockFile(hpos, true)};
1088 [ + + ]: 43554 : if (filein.IsNull()) {
1089 [ + - + - ]: 2 : LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1090 : 2 : return false;
1091 : : }
1092 : :
1093 : 43552 : try {
1094 : 43552 : MessageStartChars blk_start;
1095 : 43552 : unsigned int blk_size;
1096 : :
1097 [ + - + - ]: 43552 : filein >> blk_start >> blk_size;
1098 : :
1099 [ - + ]: 43552 : if (blk_start != GetParams().MessageStart()) {
1100 [ # # # # : 0 : LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
# # # # ]
1101 : : HexStr(blk_start),
1102 : : HexStr(GetParams().MessageStart()));
1103 : 0 : return false;
1104 : : }
1105 : :
1106 [ - + ]: 43552 : if (blk_size > MAX_SIZE) {
1107 [ # # # # ]: 0 : LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
1108 : : blk_size, MAX_SIZE);
1109 : 0 : return false;
1110 : : }
1111 : :
1112 [ + - ]: 43552 : block.resize(blk_size); // Zeroing of memory is intentional here
1113 [ + - ]: 43552 : filein.read(MakeWritableByteSpan(block));
1114 [ - - ]: 0 : } catch (const std::exception& e) {
1115 [ - - - - ]: 0 : LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
1116 : 0 : return false;
1117 : 0 : }
1118 : :
1119 : : return true;
1120 : 43554 : }
1121 : :
1122 : 120413 : FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight)
1123 : : {
1124 : 120413 : unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block));
1125 : : // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total,
1126 : : // defined as BLOCK_SERIALIZATION_HEADER_SIZE)
1127 : 120413 : nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
1128 : 120413 : FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())};
1129 [ - + ]: 120413 : if (blockPos.IsNull()) {
1130 : 0 : LogError("%s: FindNextBlockPos failed\n", __func__);
1131 : 0 : return FlatFilePos();
1132 : : }
1133 [ - + ]: 120413 : if (!WriteBlockToDisk(block, blockPos)) {
1134 [ # # ]: 0 : m_opts.notifications.fatalError(_("Failed to write block."));
1135 : 0 : return FlatFilePos();
1136 : : }
1137 : 120413 : return blockPos;
1138 : : }
1139 : :
1140 : 1086 : static auto InitBlocksdirXorKey(const BlockManager::Options& opts)
1141 : : {
1142 : : // Bytes are serialized without length indicator, so this is also the exact
1143 : : // size of the XOR-key file.
1144 : 1086 : std::array<std::byte, 8> xor_key{};
1145 : :
1146 [ + + + + ]: 1086 : if (opts.use_xor && fs::is_empty(opts.blocks_dir)) {
1147 : : // Only use random fresh key when the boolean option is set and on the
1148 : : // very first start of the program.
1149 : 441 : FastRandomContext{}.fillrand(xor_key);
1150 : : }
1151 : :
1152 [ + - ]: 2172 : const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1153 [ + - + + ]: 1086 : if (fs::exists(xor_key_path)) {
1154 : : // A pre-existing xor key file has priority.
1155 [ + - + - ]: 643 : AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1156 [ + - ]: 1286 : xor_key_file >> xor_key;
1157 : 643 : } else {
1158 : : // Create initial or missing xor key file
1159 : 443 : AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1160 : : #ifdef __MINGW64__
1161 : : "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1162 : : #else
1163 : : "wbx"
1164 : : #endif
1165 [ + - + - ]: 443 : )};
1166 [ + - ]: 886 : xor_key_file << xor_key;
1167 : 443 : }
1168 : : // If the user disabled the key, it must be zero.
1169 [ + + + + ]: 1088 : if (!opts.use_xor && xor_key != decltype(xor_key){}) {
1170 : 1 : throw std::runtime_error{
1171 [ + - ]: 2 : strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1172 : : "Stored key: '%s', stored path: '%s'.",
1173 [ + - ]: 2 : HexStr(xor_key), fs::PathToString(xor_key_path)),
1174 [ + - + - ]: 3 : };
1175 : : }
1176 [ + - + - : 3255 : LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key));
+ - ]
1177 [ + - ]: 1085 : return std::vector<std::byte>{xor_key.begin(), xor_key.end()};
1178 : 1085 : }
1179 : :
1180 : 1086 : BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
1181 : 1086 : : m_prune_mode{opts.prune_target > 0},
1182 [ + + ]: 1086 : m_xor_key{InitBlocksdirXorKey(opts)},
1183 [ + - ]: 1085 : m_opts{std::move(opts)},
1184 [ + - + + : 2140 : m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
+ - ]
1185 [ + - + - ]: 1085 : m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1186 [ + + + - ]: 2171 : m_interrupt{interrupt} {}
1187 : :
1188 : : class ImportingNow
1189 : : {
1190 : : std::atomic<bool>& m_importing;
1191 : :
1192 : : public:
1193 : 889 : ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1194 : : {
1195 [ - + ]: 889 : assert(m_importing == false);
1196 : 889 : m_importing = true;
1197 : 889 : }
1198 : 889 : ~ImportingNow()
1199 : : {
1200 [ - + ]: 889 : assert(m_importing == true);
1201 : 889 : m_importing = false;
1202 : 889 : }
1203 : : };
1204 : :
1205 : 889 : void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1206 : : {
1207 : 889 : ImportingNow imp{chainman.m_blockman.m_importing};
1208 : :
1209 : : // -reindex
1210 [ + + ]: 889 : if (!chainman.m_blockman.m_blockfiles_indexed) {
1211 : 18 : int nFile = 0;
1212 : : // Map of disk positions for blocks with unknown parent (only used for reindex);
1213 : : // parent hash -> child disk position, multiple children can have the same parent.
1214 : 18 : std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1215 : 46 : while (true) {
1216 [ + - ]: 32 : FlatFilePos pos(nFile, 0);
1217 [ + - + + ]: 96 : if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1218 : : break; // No block files left to reindex
1219 : : }
1220 [ + - ]: 16 : AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
1221 [ + - ]: 16 : if (file.IsNull()) {
1222 : : break; // This error is logged in OpenBlockFile
1223 : : }
1224 [ + - ]: 16 : LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1225 [ + - ]: 16 : chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1226 [ + - + + ]: 16 : if (chainman.m_interrupt) {
1227 [ + - ]: 2 : LogPrintf("Interrupt requested. Exit %s\n", __func__);
1228 : 2 : return;
1229 : : }
1230 : 14 : nFile++;
1231 : 16 : }
1232 [ + - + - ]: 48 : WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1233 [ + - ]: 16 : chainman.m_blockman.m_blockfiles_indexed = true;
1234 [ + - ]: 16 : LogPrintf("Reindexing finished\n");
1235 : : // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1236 [ + - + - ]: 16 : chainman.ActiveChainstate().LoadGenesisBlock();
1237 : 18 : }
1238 : :
1239 : : // -loadblock=
1240 [ + + ]: 888 : for (const fs::path& path : import_paths) {
1241 [ + - + - ]: 1 : AutoFile file{fsbridge::fopen(path, "rb")};
1242 [ + - ]: 1 : if (!file.IsNull()) {
1243 [ + - + - ]: 2 : LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1244 [ + - ]: 1 : chainman.LoadExternalBlockFile(file);
1245 [ + - - + ]: 1 : if (chainman.m_interrupt) {
1246 [ # # ]: 0 : LogPrintf("Interrupt requested. Exit %s\n", __func__);
1247 : 0 : return;
1248 : : }
1249 : : } else {
1250 [ # # # # ]: 0 : LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1251 : : }
1252 : 1 : }
1253 : :
1254 : : // scan for better chains in the block chain database, that are not yet connected in the active best chain
1255 : :
1256 : : // We can't hold cs_main during ActivateBestChain even though we're accessing
1257 : : // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1258 : : // the relevant pointers before the ABC call.
1259 [ + - + + : 3553 : for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
+ - ]
1260 [ + - ]: 892 : BlockValidationState state;
1261 [ + - - + : 892 : if (!chainstate->ActivateBestChain(state, nullptr)) {
- + ]
1262 [ # # # # : 0 : chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
# # # # ]
1263 : 0 : return;
1264 : : }
1265 : 892 : }
1266 : : // End scope of ImportingNow
1267 : 889 : }
1268 : :
1269 : 10 : std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1270 [ - + - ]: 10 : switch(type) {
1271 : 0 : case BlockfileType::NORMAL: os << "normal"; break;
1272 : 10 : case BlockfileType::ASSUMED: os << "assumed"; break;
1273 : 0 : default: os.setstate(std::ios_base::failbit);
1274 : : }
1275 : 10 : return os;
1276 : : }
1277 : :
1278 : 10 : std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1279 [ + - ]: 10 : os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1280 : 10 : return os;
1281 : : }
1282 : : } // namespace node
|