Branch data Line data Source code
1 : : // Copyright (c) 2011-2022 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <node/blockstorage.h>
6 : :
7 : : #include <arith_uint256.h>
8 : : #include <chain.h>
9 : : #include <consensus/params.h>
10 : : #include <consensus/validation.h>
11 : : #include <dbwrapper.h>
12 : : #include <flatfile.h>
13 : : #include <hash.h>
14 : : #include <kernel/blockmanager_opts.h>
15 : : #include <kernel/chainparams.h>
16 : : #include <kernel/messagestartchars.h>
17 : : #include <kernel/notifications_interface.h>
18 : : #include <logging.h>
19 : : #include <pow.h>
20 : : #include <primitives/block.h>
21 : : #include <primitives/transaction.h>
22 : : #include <random.h>
23 : : #include <serialize.h>
24 : : #include <signet.h>
25 : : #include <span.h>
26 : : #include <streams.h>
27 : : #include <sync.h>
28 : : #include <tinyformat.h>
29 : : #include <uint256.h>
30 : : #include <undo.h>
31 : : #include <util/batchpriority.h>
32 : : #include <util/check.h>
33 : : #include <util/fs.h>
34 : : #include <util/signalinterrupt.h>
35 : : #include <util/strencodings.h>
36 : : #include <util/translation.h>
37 : : #include <validation.h>
38 : :
39 : : #include <cstddef>
40 : : #include <map>
41 : : #include <ranges>
42 : : #include <unordered_map>
43 : :
44 : : namespace kernel {
45 : : static constexpr uint8_t DB_BLOCK_FILES{'f'};
46 : : static constexpr uint8_t DB_BLOCK_INDEX{'b'};
47 : : static constexpr uint8_t DB_FLAG{'F'};
48 : : static constexpr uint8_t DB_REINDEX_FLAG{'R'};
49 : : static constexpr uint8_t DB_LAST_BLOCK{'l'};
50 : : // Keys used in previous version that might still be found in the DB:
51 : : // BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
52 : : // BlockTreeDB::DB_TXINDEX{'t'}
53 : : // BlockTreeDB::ReadFlag("txindex")
54 : :
55 : 2350 : bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
56 : : {
57 : 2350 : return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
58 : : }
59 : :
60 : 36 : bool BlockTreeDB::WriteReindexing(bool fReindexing)
61 : : {
62 [ + + ]: 36 : if (fReindexing) {
63 : 18 : return Write(DB_REINDEX_FLAG, uint8_t{'1'});
64 : : } else {
65 : 18 : return Erase(DB_REINDEX_FLAG);
66 : : }
67 : : }
68 : :
69 : 1109 : void BlockTreeDB::ReadReindexing(bool& fReindexing)
70 : : {
71 : 1109 : fReindexing = Exists(DB_REINDEX_FLAG);
72 : 1109 : }
73 : :
74 : 1110 : bool BlockTreeDB::ReadLastBlockFile(int& nFile)
75 : : {
76 : 1110 : return Read(DB_LAST_BLOCK, nFile);
77 : : }
78 : :
79 : 3129 : bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
80 : : {
81 : 3129 : CDBBatch batch(*this);
82 [ + - + + ]: 4778 : for (const auto& [file, info] : fileInfo) {
83 [ + - ]: 1649 : batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
84 : : }
85 [ + - ]: 3129 : batch.Write(DB_LAST_BLOCK, nLastFile);
86 [ + + ]: 155168 : for (const CBlockIndex* bi : blockinfo) {
87 [ + - ]: 152039 : batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
88 : : }
89 [ + - ]: 6258 : return WriteBatch(batch, true);
90 : 3129 : }
91 : :
92 : 15 : bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
93 : : {
94 [ - + + - ]: 15 : return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
95 : : }
96 : :
97 : 1109 : bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
98 : : {
99 : 1109 : uint8_t ch;
100 [ + - + + ]: 1109 : if (!Read(std::make_pair(DB_FLAG, name), ch)) {
101 : : return false;
102 : : }
103 : 16 : fValue = ch == uint8_t{'1'};
104 : 16 : return true;
105 : : }
106 : :
107 : 1113 : bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
108 : : {
109 : 1113 : AssertLockHeld(::cs_main);
110 [ + - ]: 1113 : std::unique_ptr<CDBIterator> pcursor(NewIterator());
111 [ + - ]: 1113 : pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
112 : :
113 : : // Load m_block_index
114 [ + - + + ]: 142851 : while (pcursor->Valid()) {
115 [ + - + + ]: 142403 : if (interrupt) return false;
116 : 142402 : std::pair<uint8_t, uint256> key;
117 [ + - + + : 142402 : if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
+ - ]
118 : 141738 : CDiskBlockIndex diskindex;
119 [ + - + - ]: 141738 : if (pcursor->GetValue(diskindex)) {
120 : : // Construct block index object
121 [ + - + - ]: 141738 : CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
122 [ + - ]: 141738 : pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
123 : 141738 : pindexNew->nHeight = diskindex.nHeight;
124 : 141738 : pindexNew->nFile = diskindex.nFile;
125 : 141738 : pindexNew->nDataPos = diskindex.nDataPos;
126 : 141738 : pindexNew->nUndoPos = diskindex.nUndoPos;
127 : 141738 : pindexNew->nVersion = diskindex.nVersion;
128 : 141738 : pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
129 : 141738 : pindexNew->nTime = diskindex.nTime;
130 : 141738 : pindexNew->nBits = diskindex.nBits;
131 : 141738 : pindexNew->nNonce = diskindex.nNonce;
132 : 141738 : pindexNew->nStatus = diskindex.nStatus;
133 : 141738 : pindexNew->nTx = diskindex.nTx;
134 : :
135 [ + - - + ]: 141738 : if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
136 [ # # # # ]: 0 : LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
137 : 0 : return false;
138 : : }
139 : :
140 [ + - ]: 141738 : pcursor->Next();
141 : : } else {
142 [ # # ]: 0 : LogError("%s: failed to read value\n", __func__);
143 : : return false;
144 : : }
145 : : } else {
146 : : break;
147 : : }
148 : : }
149 : :
150 : : return true;
151 : 1113 : }
152 : : } // namespace kernel
153 : :
154 : : namespace node {
155 : :
156 : 766321115 : bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
157 : : {
158 : : // First sort by most total work, ...
159 [ + + ]: 766321115 : if (pa->nChainWork > pb->nChainWork) return false;
160 [ + + ]: 503300421 : if (pa->nChainWork < pb->nChainWork) return true;
161 : :
162 : : // ... then by earliest time received, ...
163 [ + + ]: 7214451 : if (pa->nSequenceId < pb->nSequenceId) return false;
164 [ + + ]: 7170603 : if (pa->nSequenceId > pb->nSequenceId) return true;
165 : :
166 : : // Use pointer address as tie breaker (should only happen with blocks
167 : : // loaded from disk, as those all have id 0).
168 [ + + ]: 7158540 : if (pa < pb) return false;
169 [ + + ]: 7157441 : if (pa > pb) return true;
170 : :
171 : : // Identical blocks.
172 : : return false;
173 : : }
174 : :
175 : 2889481 : bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
176 : : {
177 : 2889481 : return pa->nHeight < pb->nHeight;
178 : : }
179 : :
180 : 2220 : std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
181 : : {
182 : 2220 : AssertLockHeld(cs_main);
183 : 2220 : std::vector<CBlockIndex*> rv;
184 [ + - ]: 2220 : rv.reserve(m_block_index.size());
185 [ + - + + ]: 284986 : for (auto& [_, block_index] : m_block_index) {
186 [ + - ]: 282766 : rv.push_back(&block_index);
187 : : }
188 : 2220 : return rv;
189 : 0 : }
190 : :
191 : 890457 : CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
192 : : {
193 : 890457 : AssertLockHeld(cs_main);
194 : 890457 : BlockMap::iterator it = m_block_index.find(hash);
195 [ + + ]: 890457 : return it == m_block_index.end() ? nullptr : &it->second;
196 : : }
197 : :
198 : 6 : const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
199 : : {
200 : 6 : AssertLockHeld(cs_main);
201 : 6 : BlockMap::const_iterator it = m_block_index.find(hash);
202 [ + + ]: 6 : return it == m_block_index.end() ? nullptr : &it->second;
203 : : }
204 : :
205 : 142519 : CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
206 : : {
207 : 142519 : AssertLockHeld(cs_main);
208 : :
209 [ + + ]: 142519 : auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
210 [ + + ]: 142519 : if (!inserted) {
211 : 3 : return &mi->second;
212 : : }
213 : 142516 : CBlockIndex* pindexNew = &(*mi).second;
214 : :
215 : : // We assign the sequence id to blocks only when the full data is available,
216 : : // to avoid miners withholding blocks but broadcasting headers, to get a
217 : : // competitive advantage.
218 : 142516 : pindexNew->nSequenceId = 0;
219 : :
220 : 142516 : pindexNew->phashBlock = &((*mi).first);
221 : 142516 : BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
222 [ + + ]: 142516 : if (miPrev != m_block_index.end()) {
223 : 142051 : pindexNew->pprev = &(*miPrev).second;
224 : 142051 : pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
225 : 142051 : pindexNew->BuildSkip();
226 : : }
227 [ + + + + ]: 142516 : pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
228 [ + + + - ]: 427548 : pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
229 : 142516 : pindexNew->RaiseValidity(BLOCK_VALID_TREE);
230 [ + + + + ]: 142516 : if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
231 : 123260 : best_header = pindexNew;
232 : : }
233 : :
234 : 142516 : m_dirty_blockindex.insert(pindexNew);
235 : :
236 : 142516 : return pindexNew;
237 : : }
238 : :
239 : 70 : void BlockManager::PruneOneBlockFile(const int fileNumber)
240 : : {
241 : 70 : AssertLockHeld(cs_main);
242 : 70 : LOCK(cs_LastBlockFile);
243 : :
244 [ + + ]: 119787 : for (auto& entry : m_block_index) {
245 : 119717 : CBlockIndex* pindex = &entry.second;
246 [ + + ]: 119717 : if (pindex->nFile == fileNumber) {
247 : 16812 : pindex->nStatus &= ~BLOCK_HAVE_DATA;
248 : 16812 : pindex->nStatus &= ~BLOCK_HAVE_UNDO;
249 : 16812 : pindex->nFile = 0;
250 : 16812 : pindex->nDataPos = 0;
251 : 16812 : pindex->nUndoPos = 0;
252 [ + - ]: 16812 : m_dirty_blockindex.insert(pindex);
253 : :
254 : : // Prune from m_blocks_unlinked -- any block we prune would have
255 : : // to be downloaded again in order to consider its chain, at which
256 : : // point it would be considered as a candidate for
257 : : // m_blocks_unlinked or setBlockIndexCandidates.
258 : 16812 : auto range = m_blocks_unlinked.equal_range(pindex->pprev);
259 [ - + ]: 16812 : while (range.first != range.second) {
260 : 0 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
261 : 0 : range.first++;
262 [ # # ]: 0 : if (_it->second == pindex) {
263 : 0 : m_blocks_unlinked.erase(_it);
264 : : }
265 : : }
266 : : }
267 : : }
268 : :
269 [ + - ]: 70 : m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
270 [ + - ]: 70 : m_dirty_fileinfo.insert(fileNumber);
271 : 70 : }
272 : :
273 : 27 : void BlockManager::FindFilesToPruneManual(
274 : : std::set<int>& setFilesToPrune,
275 : : int nManualPruneHeight,
276 : : const Chainstate& chain,
277 : : ChainstateManager& chainman)
278 : : {
279 [ + - - + ]: 27 : assert(IsPruneMode() && nManualPruneHeight > 0);
280 : :
281 [ + - ]: 27 : LOCK2(cs_main, cs_LastBlockFile);
282 [ - + ]: 27 : if (chain.m_chain.Height() < 0) {
283 [ # # ]: 0 : return;
284 : : }
285 : :
286 [ + - ]: 27 : const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
287 : :
288 : 27 : int count = 0;
289 [ + + ]: 149 : for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
290 [ + + ]: 122 : const auto& fileinfo = m_blockfile_info[fileNumber];
291 [ + + + + : 122 : if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
- + ]
292 : 69 : continue;
293 : : }
294 : :
295 [ + - ]: 53 : PruneOneBlockFile(fileNumber);
296 [ + - ]: 53 : setFilesToPrune.insert(fileNumber);
297 : 53 : count++;
298 : : }
299 [ + - + - ]: 27 : LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
300 : : chain.GetRole(), last_block_can_prune, count);
301 [ - - + - ]: 54 : }
302 : :
303 : 555 : void BlockManager::FindFilesToPrune(
304 : : std::set<int>& setFilesToPrune,
305 : : int last_prune,
306 : : const Chainstate& chain,
307 : : ChainstateManager& chainman)
308 : : {
309 [ + - ]: 555 : LOCK2(cs_main, cs_LastBlockFile);
310 : : // Distribute our -prune budget over all chainstates.
311 [ + - ]: 555 : const auto target = std::max(
312 [ + - + + ]: 713 : MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
313 : 555 : const uint64_t target_sync_height = chainman.m_best_header->nHeight;
314 : :
315 [ + + + - ]: 555 : if (chain.m_chain.Height() < 0 || target == 0) {
316 : : return;
317 : : }
318 [ + + ]: 541 : if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
319 : : return;
320 : : }
321 : :
322 [ + - + - ]: 435 : const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
323 : :
324 [ + - ]: 435 : uint64_t nCurrentUsage = CalculateCurrentUsage();
325 : : // We don't check to prune until after we've allocated new space for files
326 : : // So we should leave a buffer under our target to account for another allocation
327 : : // before the next pruning.
328 : 435 : uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
329 : 435 : uint64_t nBytesToPrune;
330 : 435 : int count = 0;
331 : :
332 [ + + ]: 435 : if (nCurrentUsage + nBuffer >= target) {
333 : : // On a prune event, the chainstate DB is flushed.
334 : : // To avoid excessive prune events negating the benefit of high dbcache
335 : : // values, we should not prune too rapidly.
336 : : // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
337 [ + - ]: 9 : const auto chain_tip_height = chain.m_chain.Height();
338 [ + - - + : 9 : if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
- - ]
339 : : // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
340 : 0 : static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
341 : 0 : const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
342 : 0 : nBuffer += average_block_size * remaining_blocks;
343 : : }
344 : :
345 [ + - ]: 35 : for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
346 [ + + ]: 35 : const auto& fileinfo = m_blockfile_info[fileNumber];
347 : 35 : nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
348 : :
349 [ + + ]: 35 : if (fileinfo.nSize == 0) {
350 : 13 : continue;
351 : : }
352 : :
353 [ + + ]: 22 : if (nCurrentUsage + nBuffer < target) { // are we below our target?
354 : : break;
355 : : }
356 : :
357 : : // don't prune files that could have a block that's not within the allowable
358 : : // prune range for the chain being pruned.
359 [ + - - + ]: 13 : if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
360 : 0 : continue;
361 : : }
362 : :
363 [ + - ]: 13 : PruneOneBlockFile(fileNumber);
364 : : // Queue up the files for removal
365 [ + - ]: 13 : setFilesToPrune.insert(fileNumber);
366 : 13 : nCurrentUsage -= nBytesToPrune;
367 : 13 : count++;
368 : : }
369 : : }
370 : :
371 [ + - + - : 435 : LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
+ - + - +
- ]
372 : : chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
373 : : (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
374 : : min_block_to_prune, last_block_can_prune, count);
375 [ + - + - ]: 1110 : }
376 : :
377 : 20716 : void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
378 : 20716 : AssertLockHeld(::cs_main);
379 : 20716 : m_prune_locks[name] = lock_info;
380 : 20716 : }
381 : :
382 : 283476 : CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
383 : : {
384 : 283476 : AssertLockHeld(cs_main);
385 : :
386 [ + + ]: 283476 : if (hash.IsNull()) {
387 : : return nullptr;
388 : : }
389 : :
390 [ + + ]: 282812 : const auto [mi, inserted]{m_block_index.try_emplace(hash)};
391 [ + + ]: 282812 : CBlockIndex* pindex = &(*mi).second;
392 [ + + ]: 282812 : if (inserted) {
393 : 140776 : pindex->phashBlock = &((*mi).first);
394 : : }
395 : : return pindex;
396 : : }
397 : :
398 : 1113 : bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
399 : : {
400 [ + - + + ]: 1113 : if (!m_block_tree_db->LoadBlockIndexGuts(
401 [ + - ]: 284589 : GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
402 : : return false;
403 : : }
404 : :
405 [ + + ]: 1112 : if (snapshot_blockhash) {
406 : 11 : const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
407 [ + + ]: 11 : if (!maybe_au_data) {
408 [ + - + - ]: 2 : m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
409 : 1 : return false;
410 : : }
411 : 10 : const AssumeutxoData& au_data = *Assert(maybe_au_data);
412 : 10 : m_snapshot_height = au_data.height;
413 : 10 : CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
414 : :
415 : : // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
416 : : // to disk, we must bootstrap the value for assumedvalid chainstates
417 : : // from the hardcoded assumeutxo chainparams.
418 : 10 : base->m_chain_tx_count = au_data.m_chain_tx_count;
419 [ + - ]: 20 : LogPrintf("[snapshot] set m_chain_tx_count=%d for %s\n", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
420 : : } else {
421 : : // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
422 : : // is null. This is relevant during snapshot completion, when the blockman may be loaded
423 : : // with a height that then needs to be cleared after the snapshot is fully validated.
424 [ + + ]: 1101 : m_snapshot_height.reset();
425 : : }
426 : :
427 : 1111 : Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
428 : :
429 : : // Calculate nChainWork
430 : 1111 : std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
431 [ + - ]: 1111 : std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
432 : : CBlockIndexHeightOnlyComparator());
433 : :
434 : 1111 : CBlockIndex* previous_index{nullptr};
435 [ + + ]: 142524 : for (CBlockIndex* pindex : vSortedByHeight) {
436 [ + - + - ]: 141414 : if (m_interrupt) return false;
437 [ + + + + ]: 141414 : if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
438 [ + - ]: 1 : LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
439 : : return false;
440 : : }
441 : 141413 : previous_index = pindex;
442 [ + - + + : 424239 : pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ - ]
443 [ + + + + ]: 141413 : pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
444 : :
445 : : // We can link the chain of blocks for which we've received transactions at some point, or
446 : : // blocks that are assumed-valid on the basis of snapshot load (see
447 : : // PopulateAndValidateSnapshot()).
448 : : // Pruned nodes may have deleted the block.
449 [ + + ]: 141413 : if (pindex->nTx > 0) {
450 [ + + ]: 139978 : if (pindex->pprev) {
451 [ + + + + : 139318 : if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
+ - ]
452 [ + - ]: 4 : pindex->GetBlockHash() == *snapshot_blockhash) {
453 : : // Should have been set above; don't disturb it with code below.
454 [ + - ]: 4 : Assert(pindex->m_chain_tx_count > 0);
455 [ + + ]: 139310 : } else if (pindex->pprev->m_chain_tx_count > 0) {
456 : 139306 : pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
457 : : } else {
458 : 4 : pindex->m_chain_tx_count = 0;
459 [ + - ]: 4 : m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
460 : : }
461 : : } else {
462 : 664 : pindex->m_chain_tx_count = pindex->nTx;
463 : : }
464 : : }
465 [ + + + + : 141413 : if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
- + ]
466 : 0 : pindex->nStatus |= BLOCK_FAILED_CHILD;
467 [ # # ]: 0 : m_dirty_blockindex.insert(pindex);
468 : : }
469 [ + + ]: 141413 : if (pindex->pprev) {
470 [ + - ]: 140720 : pindex->BuildSkip();
471 : : }
472 : : }
473 : :
474 : : return true;
475 : 1111 : }
476 : :
477 : 3129 : bool BlockManager::WriteBlockIndexDB()
478 : : {
479 : 3129 : AssertLockHeld(::cs_main);
480 : 3129 : std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
481 [ + - ]: 3129 : vFiles.reserve(m_dirty_fileinfo.size());
482 [ + + ]: 4778 : for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
483 [ + - ]: 1649 : vFiles.emplace_back(*it, &m_blockfile_info[*it]);
484 : 1649 : m_dirty_fileinfo.erase(it++);
485 : : }
486 : 3129 : std::vector<const CBlockIndex*> vBlocks;
487 [ + - ]: 3129 : vBlocks.reserve(m_dirty_blockindex.size());
488 [ + + ]: 155168 : for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
489 [ + - ]: 152039 : vBlocks.push_back(*it);
490 : 152039 : m_dirty_blockindex.erase(it++);
491 : : }
492 [ + - + - ]: 6258 : int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
493 [ + - - + ]: 3129 : if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
494 : 0 : return false;
495 : : }
496 : : return true;
497 : 3129 : }
498 : :
499 : 1113 : bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
500 : : {
501 [ + + ]: 1113 : if (!LoadBlockIndex(snapshot_blockhash)) {
502 : : return false;
503 : : }
504 : 1110 : int max_blockfile_num{0};
505 : :
506 : : // Load block file info
507 : 1110 : m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
508 : 1110 : m_blockfile_info.resize(max_blockfile_num + 1);
509 : 1110 : LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
510 [ + + ]: 2350 : for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
511 : 1240 : m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
512 : : }
513 [ + - ]: 1110 : LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
514 : 1110 : for (int nFile = max_blockfile_num + 1; true; nFile++) {
515 : 1110 : CBlockFileInfo info;
516 [ - + ]: 1110 : if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
517 : 0 : m_blockfile_info.push_back(info);
518 : : } else {
519 : : break;
520 : : }
521 : 0 : }
522 : :
523 : : // Check presence of blk files
524 : 1110 : LogPrintf("Checking all blk files are present...\n");
525 : 1110 : std::set<int> setBlkDataFiles;
526 [ + + + + ]: 142494 : for (const auto& [_, block_index] : m_block_index) {
527 [ + + ]: 141384 : if (block_index.nStatus & BLOCK_HAVE_DATA) {
528 [ + - ]: 125572 : setBlkDataFiles.insert(block_index.nFile);
529 : : }
530 : : }
531 [ + + ]: 1844 : for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
532 [ + - ]: 735 : FlatFilePos pos(*it, 0);
533 [ + - + + ]: 1470 : if (OpenBlockFile(pos, true).IsNull()) {
534 : : return false;
535 : : }
536 : : }
537 : :
538 : 1109 : {
539 : : // Initialize the blockfile cursors.
540 [ + - ]: 1109 : LOCK(cs_LastBlockFile);
541 [ + + ]: 2348 : for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
542 [ + - ]: 1239 : const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
543 [ + - + + ]: 2478 : m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
544 : : }
545 : 0 : }
546 : :
547 : : // Check whether we have ever pruned block & undo files
548 [ + - + - ]: 1109 : m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
549 [ + + ]: 1109 : if (m_have_pruned) {
550 [ + - ]: 16 : LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
551 : : }
552 : :
553 : : // Check whether we need to continue reindexing
554 : 1109 : bool fReindexing = false;
555 [ + - ]: 1109 : m_block_tree_db->ReadReindexing(fReindexing);
556 [ + + ]: 1109 : if (fReindexing) m_blockfiles_indexed = false;
557 : :
558 : : return true;
559 : 1110 : }
560 : :
561 : 1112 : void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
562 : : {
563 : 1112 : AssertLockHeld(::cs_main);
564 [ + - ]: 2224 : int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
565 [ + + ]: 1112 : if (!m_have_pruned) {
566 : : return;
567 : : }
568 : :
569 : 18 : std::set<int> block_files_to_prune;
570 [ + + ]: 108 : for (int file_number = 0; file_number < max_blockfile; file_number++) {
571 [ + + ]: 90 : if (m_blockfile_info[file_number].nSize == 0) {
572 [ + - ]: 61 : block_files_to_prune.insert(file_number);
573 : : }
574 : : }
575 : :
576 [ + - ]: 18 : UnlinkPrunedFiles(block_files_to_prune);
577 : 18 : }
578 : :
579 : 187946 : const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
580 : : {
581 : 187946 : const MapCheckpoints& checkpoints = data.mapCheckpoints;
582 : :
583 [ + + ]: 217809 : for (const MapCheckpoints::value_type& i : checkpoints | std::views::reverse) {
584 : 214993 : const uint256& hash = i.second;
585 : 214993 : const CBlockIndex* pindex = LookupBlockIndex(hash);
586 [ + + ]: 214993 : if (pindex) {
587 : : return pindex;
588 : : }
589 : : }
590 : : return nullptr;
591 : : }
592 : :
593 : 232 : bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
594 : : {
595 : 232 : AssertLockHeld(::cs_main);
596 [ + + + - : 232 : return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
- + ]
597 : : }
598 : :
599 : 109 : const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
600 : : {
601 : 109 : AssertLockHeld(::cs_main);
602 : 109 : const CBlockIndex* last_block = &upper_block;
603 [ - + ]: 109 : assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
604 [ + + + + ]: 39349 : while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
605 [ + + ]: 39252 : if (lower_block) {
606 : : // Return if we reached the lower_block
607 [ + + ]: 39103 : if (last_block == lower_block) return lower_block;
608 : : // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
609 : : // and so far this is not allowed.
610 [ - + ]: 39091 : assert(last_block->nHeight >= lower_block->nHeight);
611 : : }
612 : : last_block = last_block->pprev;
613 : : }
614 [ - + ]: 97 : assert(last_block != nullptr);
615 : : return last_block;
616 : : }
617 : :
618 : 38 : bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
619 : : {
620 [ + - ]: 38 : if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
621 : 38 : return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
622 : : }
623 : :
624 : : // If we're using -prune with -reindex, then delete block files that will be ignored by the
625 : : // reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
626 : : // is missing, do the same here to delete any later block files after a gap. Also delete all
627 : : // rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
628 : : // is in sync with what's actually on disk by the time we start downloading, so that pruning
629 : : // works correctly.
630 : 4 : void BlockManager::CleanupBlockRevFiles() const
631 : : {
632 [ + - ]: 4 : std::map<std::string, fs::path> mapBlockFiles;
633 : :
634 : : // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
635 : : // Remove the rev files immediately and insert the blk file paths into an
636 : : // ordered map keyed by block file index.
637 [ + - ]: 4 : LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
638 [ + - + + : 42 : for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
+ + ]
639 [ + - + - ]: 136 : const std::string path = fs::PathToString(it->path().filename());
640 [ + - + + ]: 68 : if (fs::is_regular_file(*it) &&
641 [ + + + + ]: 34 : path.length() == 12 &&
642 [ + - + - ]: 44 : path.substr(8,4) == ".dat")
643 : : {
644 [ + - + + ]: 22 : if (path.substr(0, 3) == "blk") {
645 [ + - + - : 22 : mapBlockFiles[path.substr(3, 5)] = it->path();
+ - ]
646 [ + - + - ]: 11 : } else if (path.substr(0, 3) == "rev") {
647 [ + - ]: 11 : remove(it->path());
648 : : }
649 : : }
650 [ + - ]: 34 : }
651 : :
652 : : // Remove all block files that aren't part of a contiguous set starting at
653 : : // zero by walking the ordered map (keys are block file indices) by
654 : : // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
655 : : // start removing block files.
656 : 4 : int nContigCounter = 0;
657 [ + + ]: 15 : for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
658 [ + - - + ]: 11 : if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
659 : 0 : nContigCounter++;
660 : 0 : continue;
661 : : }
662 [ + - ]: 11 : remove(item.second);
663 : : }
664 : 4 : }
665 : :
666 : 4 : CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
667 : : {
668 : 4 : LOCK(cs_LastBlockFile);
669 : :
670 [ + - + - ]: 4 : return &m_blockfile_info.at(n);
671 : 4 : }
672 : :
673 : 51381 : bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const
674 : : {
675 [ + - ]: 102762 : const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
676 : :
677 : : // Open history file to read
678 : 51381 : AutoFile filein{OpenUndoFile(pos, true)};
679 [ + + ]: 51381 : if (filein.IsNull()) {
680 [ + - + - ]: 7 : LogError("OpenUndoFile failed for %s", pos.ToString());
681 : 7 : return false;
682 : : }
683 : :
684 : : // Read block
685 : 51374 : uint256 hashChecksum;
686 [ + - ]: 51374 : HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
687 : 51374 : try {
688 [ + - ]: 51374 : verifier << index.pprev->GetBlockHash();
689 [ + + ]: 51374 : verifier >> blockundo;
690 [ + - ]: 51373 : filein >> hashChecksum;
691 [ - + ]: 1 : } catch (const std::exception& e) {
692 [ + - + - ]: 1 : LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
693 : 1 : return false;
694 : 1 : }
695 : :
696 : : // Verify checksum
697 [ + - - + ]: 51373 : if (hashChecksum != verifier.GetHash()) {
698 [ # # # # ]: 0 : LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString());
699 : 0 : return false;
700 : : }
701 : :
702 : : return true;
703 : 51381 : }
704 : :
705 : 3229 : bool BlockManager::FlushUndoFile(int block_file, bool finalize)
706 : : {
707 : 3229 : FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
708 [ - + ]: 3229 : if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
709 [ # # ]: 0 : m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
710 : 0 : return false;
711 : : }
712 : : return true;
713 : : }
714 : :
715 : 3228 : bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
716 : : {
717 : 3228 : bool success = true;
718 : 3228 : LOCK(cs_LastBlockFile);
719 : :
720 [ + - ]: 3228 : if (m_blockfile_info.size() < 1) {
721 : : // Return if we haven't loaded any blockfiles yet. This happens during
722 : : // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
723 : : // then calls FlushStateToDisk()), resulting in a call to this function before we
724 : : // have populated `m_blockfile_info` via LoadBlockIndexDB().
725 : : return true;
726 : : }
727 [ - + ]: 3228 : assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
728 : :
729 [ + - ]: 3228 : FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
730 [ + - - + ]: 3228 : if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
731 [ # # # # ]: 0 : m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
732 : 0 : success = false;
733 : : }
734 : : // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
735 : : // e.g. during IBD or a sync after a node going offline
736 [ + - ]: 3228 : if (!fFinalize || finalize_undo) {
737 [ + - - + ]: 3228 : if (!FlushUndoFile(blockfile_num, finalize_undo)) {
738 : 0 : success = false;
739 : : }
740 : : }
741 : : return success;
742 : 3228 : }
743 : :
744 : 261419 : BlockfileType BlockManager::BlockfileTypeForHeight(int height)
745 : : {
746 [ + + ]: 261419 : if (!m_snapshot_height) {
747 : : return BlockfileType::NORMAL;
748 : : }
749 [ + + ]: 3725 : return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
750 : : }
751 : :
752 : 3129 : bool BlockManager::FlushChainstateBlockFile(int tip_height)
753 : : {
754 : 3129 : LOCK(cs_LastBlockFile);
755 [ + - + + ]: 3129 : auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
756 : : // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
757 : : // but no blocks past the snapshot height have been written yet, so there
758 : : // is no data associated with the chainstate, and it is safe not to flush.
759 [ + + ]: 3129 : if (cursor) {
760 [ + - ]: 3104 : return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
761 : : }
762 : : // No need to log warnings in this case.
763 : : return true;
764 : 3129 : }
765 : :
766 : 20289 : uint64_t BlockManager::CalculateCurrentUsage()
767 : : {
768 : 20289 : LOCK(cs_LastBlockFile);
769 : :
770 : 20289 : uint64_t retval = 0;
771 [ + + ]: 42615 : for (const CBlockFileInfo& file : m_blockfile_info) {
772 : 22326 : retval += file.nSize + file.nUndoSize;
773 : : }
774 [ + - ]: 20289 : return retval;
775 : 20289 : }
776 : :
777 : 53 : void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
778 : : {
779 : 53 : std::error_code ec;
780 [ + + ]: 183 : for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
781 : 130 : FlatFilePos pos(*it, 0);
782 : 130 : const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
783 : 130 : const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
784 [ + + ]: 130 : if (removed_blockfile || removed_undofile) {
785 [ + - ]: 70 : LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
786 : : }
787 : : }
788 : 53 : }
789 : :
790 : 296201 : AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
791 : : {
792 [ + - ]: 592402 : return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key};
793 : : }
794 : :
795 : : /** Open an undo file (rev?????.dat) */
796 : 174940 : AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
797 : : {
798 [ + - ]: 349880 : return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key};
799 : : }
800 : :
801 : 34 : fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
802 : : {
803 : 34 : return m_block_file_seq.FileName(pos);
804 : : }
805 : :
806 : 124597 : FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
807 : : {
808 : 124597 : LOCK(cs_LastBlockFile);
809 : :
810 [ + - ]: 124597 : const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
811 : :
812 [ + + ]: 124597 : if (!m_blockfile_cursors[chain_type]) {
813 : : // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
814 [ - + ]: 11 : assert(chain_type == BlockfileType::ASSUMED);
815 : 11 : const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
816 [ + - ]: 11 : m_blockfile_cursors[chain_type] = new_cursor;
817 [ + - + - : 11 : LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
+ - ]
818 : : }
819 [ + + ]: 124597 : const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
820 : :
821 : 124597 : int nFile = last_blockfile;
822 [ + + ]: 124597 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
823 [ + - ]: 18 : m_blockfile_info.resize(nFile + 1);
824 : : }
825 : :
826 : 124597 : bool finalize_undo = false;
827 : 124597 : unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
828 : : // Use smaller blockfiles in test-only -fastprune mode - but avoid
829 : : // the possibility of having a block not fit into the block file.
830 [ + + ]: 124597 : if (m_opts.fast_prune) {
831 : 17786 : max_blockfile_size = 0x10000; // 64kiB
832 [ + + ]: 17786 : if (nAddSize >= max_blockfile_size) {
833 : : // dynamically adjust the blockfile size to be larger than the added size
834 : 2 : max_blockfile_size = nAddSize + 1;
835 : : }
836 : : }
837 [ - + ]: 124597 : assert(nAddSize < max_blockfile_size);
838 : :
839 [ + + ]: 124721 : while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
840 : : // when the undo file is keeping up with the block file, we want to flush it explicitly
841 : : // when it is lagging behind (more blocks arrive than are being connected), we let the
842 : : // undo block write case handle it
843 : 248 : finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
844 [ + - + - ]: 124 : Assert(m_blockfile_cursors[chain_type])->undo_height);
845 : :
846 : : // Try the next unclaimed blockfile number
847 : 124 : nFile = this->MaxBlockfileNum() + 1;
848 : : // Set to increment MaxBlockfileNum() for next iteration
849 [ + - ]: 124 : m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
850 : :
851 [ + - ]: 124 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
852 [ + - ]: 124 : m_blockfile_info.resize(nFile + 1);
853 : : }
854 : : }
855 : 124597 : FlatFilePos pos;
856 : 124597 : pos.nFile = nFile;
857 : 124597 : pos.nPos = m_blockfile_info[nFile].nSize;
858 : :
859 [ + + ]: 124597 : if (nFile != last_blockfile) {
860 [ + - + - : 248 : LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
+ - + - ]
861 : : last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
862 : :
863 : : // Do not propagate the return code. The flush concerns a previous block
864 : : // and undo file that has already been written to. If a flush fails
865 : : // here, and we crash, there is no expected additional block data
866 : : // inconsistency arising from the flush failure here. However, the undo
867 : : // data may be inconsistent after a crash if the flush is called during
868 : : // a reindex. A flush error might also leave some of the data files
869 : : // untrimmed.
870 [ + - - + ]: 124 : if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
871 [ # # # # : 0 : LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
# # ]
872 : : "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
873 : : last_blockfile, finalize_undo, nFile);
874 : : }
875 : : // No undo data yet in the new file, so reset our undo-height tracking.
876 [ + - ]: 124 : m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
877 : : }
878 : :
879 : 124597 : m_blockfile_info[nFile].AddBlock(nHeight, nTime);
880 [ + - ]: 124597 : m_blockfile_info[nFile].nSize += nAddSize;
881 : :
882 : 124597 : bool out_of_space;
883 [ + - ]: 124597 : size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
884 [ - + ]: 124597 : if (out_of_space) {
885 [ # # # # ]: 0 : m_opts.notifications.fatalError(_("Disk space is too low!"));
886 : 0 : return {};
887 : : }
888 [ + + + + ]: 124597 : if (bytes_allocated != 0 && IsPruneMode()) {
889 : 416 : m_check_for_pruning = true;
890 : : }
891 : :
892 [ + - ]: 124597 : m_dirty_fileinfo.insert(nFile);
893 : 124597 : return pos;
894 : 124597 : }
895 : :
896 : 3478 : void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
897 : : {
898 : 3478 : LOCK(cs_LastBlockFile);
899 : :
900 : : // Update the cursor so it points to the last file.
901 [ + - ]: 3478 : const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)};
902 [ + - ]: 3478 : auto& cursor{m_blockfile_cursors[chain_type]};
903 [ + - + + ]: 3478 : if (!cursor || cursor->file_num < pos.nFile) {
904 [ + - ]: 1 : m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
905 : : }
906 : :
907 : : // Update the file information with the current block.
908 : 3478 : const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
909 : 3478 : const int nFile = pos.nFile;
910 [ + + ]: 3478 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
911 [ + - ]: 15 : m_blockfile_info.resize(nFile + 1);
912 : : }
913 : 3478 : m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
914 [ + + ]: 3478 : m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
915 [ + - ]: 3478 : m_dirty_fileinfo.insert(nFile);
916 : 3478 : }
917 : :
918 : 123559 : bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
919 : : {
920 : 123559 : pos.nFile = nFile;
921 : :
922 : 123559 : LOCK(cs_LastBlockFile);
923 : :
924 [ + - ]: 123559 : pos.nPos = m_blockfile_info[nFile].nUndoSize;
925 : 123559 : m_blockfile_info[nFile].nUndoSize += nAddSize;
926 [ + - ]: 123559 : m_dirty_fileinfo.insert(nFile);
927 : :
928 : 123559 : bool out_of_space;
929 [ + - ]: 123559 : size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
930 [ - + ]: 123559 : if (out_of_space) {
931 [ # # # # ]: 0 : return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
932 : : }
933 [ + + + + ]: 123559 : if (bytes_allocated != 0 && IsPruneMode()) {
934 : 96 : m_check_for_pruning = true;
935 : : }
936 : :
937 : : return true;
938 : 123559 : }
939 : :
940 : 128976 : bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
941 : : {
942 : 128976 : AssertLockHeld(::cs_main);
943 : 128976 : const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
944 [ + + + - ]: 257952 : auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
945 : :
946 : : // Write undo information to disk
947 [ + + ]: 128976 : if (block.GetUndoPos().IsNull()) {
948 : 123559 : FlatFilePos pos;
949 : 123559 : const unsigned int blockundo_size{static_cast<unsigned int>(GetSerializeSize(blockundo))};
950 [ - + ]: 123559 : if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
951 : 0 : LogError("FindUndoPos failed");
952 : 0 : return false;
953 : : }
954 : : // Open history file to append
955 : 123559 : AutoFile fileout{OpenUndoFile(pos)};
956 [ - + ]: 123559 : if (fileout.IsNull()) {
957 [ # # ]: 0 : LogError("OpenUndoFile failed");
958 [ # # # # ]: 0 : return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
959 : : }
960 : :
961 : : // Write index header
962 [ + - + - ]: 123559 : fileout << GetParams().MessageStart() << blockundo_size;
963 : : // Write undo data
964 : 123559 : pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE;
965 [ + - ]: 123559 : fileout << blockundo;
966 : :
967 : : // Calculate & write checksum
968 [ + - ]: 123559 : HashWriter hasher{};
969 [ + - ]: 123559 : hasher << block.pprev->GetBlockHash();
970 [ + - ]: 123559 : hasher << blockundo;
971 [ + - ]: 123559 : fileout << hasher.GetHash();
972 : :
973 : : // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
974 : : // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
975 : : // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
976 : : // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
977 : : // the FindNextBlockPos function
978 [ + + + + ]: 123559 : if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) {
979 : : // Do not propagate the return code, a failed flush here should not
980 : : // be an indication for a failed write. If it were propagated here,
981 : : // the caller would assume the undo data not to be written, when in
982 : : // fact it is. Note though, that a failed flush might leave the data
983 : : // file untrimmed.
984 [ + - - + ]: 1 : if (!FlushUndoFile(pos.nFile, true)) {
985 [ # # # # : 0 : LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile);
# # ]
986 : : }
987 [ + + + + ]: 123558 : } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
988 : 114426 : cursor.undo_height = block.nHeight;
989 : : }
990 : : // update nUndoPos in block index
991 : 123559 : block.nUndoPos = pos.nPos;
992 : 123559 : block.nStatus |= BLOCK_HAVE_UNDO;
993 [ + - ]: 123559 : m_dirty_blockindex.insert(&block);
994 : 123559 : }
995 : :
996 : : return true;
997 : : }
998 : :
999 : 131558 : bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos) const
1000 : : {
1001 : 131558 : block.SetNull();
1002 : :
1003 : : // Open history file to read
1004 : 131558 : AutoFile filein{OpenBlockFile(pos, true)};
1005 [ + + ]: 131558 : if (filein.IsNull()) {
1006 [ + - + - ]: 206 : LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1007 : 206 : return false;
1008 : : }
1009 : :
1010 : : // Read block
1011 : 131352 : try {
1012 [ + - ]: 131352 : filein >> TX_WITH_WITNESS(block);
1013 [ - - ]: 0 : } catch (const std::exception& e) {
1014 [ - - - - ]: 0 : LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
1015 : 0 : return false;
1016 : 0 : }
1017 : :
1018 : : // Check the header
1019 [ + - + - : 131352 : if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
+ + ]
1020 [ + - + - ]: 4 : LogError("%s: Errors in block header at %s\n", __func__, pos.ToString());
1021 : 4 : return false;
1022 : : }
1023 : :
1024 : : // Signet only: check block solution
1025 [ + + + - : 131348 : if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
- + ]
1026 [ # # # # ]: 0 : LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString());
1027 : 0 : return false;
1028 : : }
1029 : :
1030 : : return true;
1031 : 131558 : }
1032 : :
1033 : 125702 : bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
1034 : : {
1035 [ + - ]: 251404 : const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1036 : :
1037 [ + + ]: 125702 : if (!ReadBlock(block, block_pos)) {
1038 : : return false;
1039 : : }
1040 [ - + ]: 125495 : if (block.GetHash() != index.GetBlockHash()) {
1041 [ # # # # ]: 0 : LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString());
1042 : 0 : return false;
1043 : : }
1044 : : return true;
1045 : : }
1046 : :
1047 : 39149 : bool BlockManager::ReadRawBlock(std::vector<uint8_t>& block, const FlatFilePos& pos) const
1048 : : {
1049 : 39149 : FlatFilePos hpos = pos;
1050 : : // If nPos is less than 8 the pos is null and we don't have the block data
1051 : : // Return early to prevent undefined behavior of unsigned int underflow
1052 [ - + ]: 39149 : if (hpos.nPos < 8) {
1053 [ # # ]: 0 : LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1054 : 0 : return false;
1055 : : }
1056 : 39149 : hpos.nPos -= 8; // Seek back 8 bytes for meta header
1057 : 39149 : AutoFile filein{OpenBlockFile(hpos, true)};
1058 [ + + ]: 39149 : if (filein.IsNull()) {
1059 [ + - + - ]: 2 : LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1060 : 2 : return false;
1061 : : }
1062 : :
1063 : 39147 : try {
1064 : 39147 : MessageStartChars blk_start;
1065 : 39147 : unsigned int blk_size;
1066 : :
1067 [ + - + - ]: 39147 : filein >> blk_start >> blk_size;
1068 : :
1069 [ - + ]: 39147 : if (blk_start != GetParams().MessageStart()) {
1070 [ # # # # : 0 : LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
# # # # ]
1071 : : HexStr(blk_start),
1072 : : HexStr(GetParams().MessageStart()));
1073 : 0 : return false;
1074 : : }
1075 : :
1076 [ - + ]: 39147 : if (blk_size > MAX_SIZE) {
1077 [ # # # # ]: 0 : LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
1078 : : blk_size, MAX_SIZE);
1079 : 0 : return false;
1080 : : }
1081 : :
1082 [ + - ]: 39147 : block.resize(blk_size); // Zeroing of memory is intentional here
1083 [ + - ]: 39147 : filein.read(MakeWritableByteSpan(block));
1084 [ - - ]: 0 : } catch (const std::exception& e) {
1085 [ - - - - ]: 0 : LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
1086 : 0 : return false;
1087 : 0 : }
1088 : :
1089 : : return true;
1090 : 39149 : }
1091 : :
1092 : 124597 : FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight)
1093 : : {
1094 : 124597 : const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
1095 : 124597 : FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_SERIALIZATION_HEADER_SIZE, nHeight, block.GetBlockTime())};
1096 [ - + ]: 124597 : if (pos.IsNull()) {
1097 : 0 : LogError("FindNextBlockPos failed");
1098 : 0 : return FlatFilePos();
1099 : : }
1100 : 124597 : AutoFile fileout{OpenBlockFile(pos)};
1101 [ - + ]: 124597 : if (fileout.IsNull()) {
1102 [ # # ]: 0 : LogError("OpenBlockFile failed");
1103 [ # # # # ]: 0 : m_opts.notifications.fatalError(_("Failed to write block."));
1104 : 0 : return FlatFilePos();
1105 : : }
1106 : :
1107 : : // Write index header
1108 [ + - + - ]: 124597 : fileout << GetParams().MessageStart() << block_size;
1109 : : // Write block
1110 : 124597 : pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE;
1111 [ + - ]: 124597 : fileout << TX_WITH_WITNESS(block);
1112 : 124597 : return pos;
1113 : 124597 : }
1114 : :
1115 : 1139 : static auto InitBlocksdirXorKey(const BlockManager::Options& opts)
1116 : : {
1117 : : // Bytes are serialized without length indicator, so this is also the exact
1118 : : // size of the XOR-key file.
1119 : 1139 : std::array<std::byte, 8> xor_key{};
1120 : :
1121 : : // Consider this to be the first run if the blocksdir contains only hidden
1122 : : // files (those which start with a .). Checking for a fully-empty dir would
1123 : : // be too aggressive as a .lock file may have already been written.
1124 : 1139 : bool first_run = true;
1125 [ + + + + : 4991 : for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) {
+ + + + +
+ ]
1126 [ + - + - ]: 3852 : const std::string path = fs::PathToString(entry.path().filename());
1127 [ + - + - ]: 1239 : if (!entry.is_regular_file() || !path.starts_with('.')) {
1128 : 687 : first_run = false;
1129 : 687 : break;
1130 : : }
1131 [ + - + + : 2102 : }
+ + - - ]
1132 : :
1133 [ + + + + ]: 1139 : if (opts.use_xor && first_run) {
1134 : : // Only use random fresh key when the boolean option is set and on the
1135 : : // very first start of the program.
1136 : 452 : FastRandomContext{}.fillrand(xor_key);
1137 : : }
1138 : :
1139 [ + - ]: 2278 : const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1140 [ + - + + ]: 1139 : if (fs::exists(xor_key_path)) {
1141 : : // A pre-existing xor key file has priority.
1142 [ + - + - ]: 685 : AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1143 [ + - ]: 1370 : xor_key_file >> xor_key;
1144 : 685 : } else {
1145 : : // Create initial or missing xor key file
1146 : 454 : AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1147 : : #ifdef __MINGW64__
1148 : : "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1149 : : #else
1150 : : "wbx"
1151 : : #endif
1152 [ + - + - ]: 454 : )};
1153 [ + - ]: 908 : xor_key_file << xor_key;
1154 : 454 : }
1155 : : // If the user disabled the key, it must be zero.
1156 [ + + + + ]: 1141 : if (!opts.use_xor && xor_key != decltype(xor_key){}) {
1157 : 1 : throw std::runtime_error{
1158 [ + - ]: 2 : strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1159 : : "Stored key: '%s', stored path: '%s'.",
1160 [ + - ]: 2 : HexStr(xor_key), fs::PathToString(xor_key_path)),
1161 [ + - + - ]: 3 : };
1162 : : }
1163 [ + - + - : 3414 : LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key));
+ - ]
1164 [ + - ]: 1138 : return std::vector<std::byte>{xor_key.begin(), xor_key.end()};
1165 : 1138 : }
1166 : :
1167 : 1139 : BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
1168 : 1139 : : m_prune_mode{opts.prune_target > 0},
1169 [ + + ]: 1140 : m_xor_key{InitBlocksdirXorKey(opts)},
1170 [ + - ]: 1138 : m_opts{std::move(opts)},
1171 [ + - + + : 2245 : m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
+ - ]
1172 [ + - + - ]: 1139 : m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1173 [ + + + - : 2277 : m_interrupt{interrupt}
+ + ]
1174 : : {
1175 [ + + ]: 1138 : m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
1176 : :
1177 [ + + ]: 1137 : if (m_opts.block_tree_db_params.wipe_data) {
1178 [ + - ]: 18 : m_block_tree_db->WriteReindexing(true);
1179 [ + + ]: 18 : m_blockfiles_indexed = false;
1180 : : // If we're reindexing in prune mode, wipe away unusable block files and all undo data files
1181 [ + + ]: 18 : if (m_prune_mode) {
1182 [ + - ]: 4 : CleanupBlockRevFiles();
1183 : : }
1184 : : }
1185 : 1144 : }
1186 : :
1187 : : class ImportingNow
1188 : : {
1189 : : std::atomic<bool>& m_importing;
1190 : :
1191 : : public:
1192 : 938 : ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1193 : : {
1194 [ - + ]: 938 : assert(m_importing == false);
1195 : 938 : m_importing = true;
1196 : 938 : }
1197 : 938 : ~ImportingNow()
1198 : : {
1199 [ - + ]: 938 : assert(m_importing == true);
1200 : 938 : m_importing = false;
1201 : 938 : }
1202 : : };
1203 : :
1204 : 938 : void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1205 : : {
1206 : 938 : ImportingNow imp{chainman.m_blockman.m_importing};
1207 : :
1208 : : // -reindex
1209 [ + + ]: 938 : if (!chainman.m_blockman.m_blockfiles_indexed) {
1210 : 19 : int nFile = 0;
1211 : : // Map of disk positions for blocks with unknown parent (only used for reindex);
1212 : : // parent hash -> child disk position, multiple children can have the same parent.
1213 : 19 : std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1214 : 49 : while (true) {
1215 [ + - ]: 34 : FlatFilePos pos(nFile, 0);
1216 [ + - + + ]: 102 : if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1217 : : break; // No block files left to reindex
1218 : : }
1219 [ + - ]: 16 : AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
1220 [ + - ]: 16 : if (file.IsNull()) {
1221 : : break; // This error is logged in OpenBlockFile
1222 : : }
1223 [ + - ]: 16 : LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1224 [ + - ]: 16 : chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1225 [ + - + + ]: 16 : if (chainman.m_interrupt) {
1226 [ + - ]: 1 : LogPrintf("Interrupt requested. Exit %s\n", __func__);
1227 : 1 : return;
1228 : : }
1229 : 15 : nFile++;
1230 : 16 : }
1231 [ + - + - ]: 54 : WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1232 [ + - ]: 18 : chainman.m_blockman.m_blockfiles_indexed = true;
1233 [ + - ]: 18 : LogPrintf("Reindexing finished\n");
1234 : : // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1235 [ + - + - ]: 18 : chainman.ActiveChainstate().LoadGenesisBlock();
1236 : 19 : }
1237 : :
1238 : : // -loadblock=
1239 [ + + ]: 938 : for (const fs::path& path : import_paths) {
1240 [ + - + - ]: 1 : AutoFile file{fsbridge::fopen(path, "rb")};
1241 [ + - ]: 1 : if (!file.IsNull()) {
1242 [ + - + - ]: 2 : LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1243 [ + - ]: 1 : chainman.LoadExternalBlockFile(file);
1244 [ + - - + ]: 1 : if (chainman.m_interrupt) {
1245 [ # # ]: 0 : LogPrintf("Interrupt requested. Exit %s\n", __func__);
1246 : 0 : return;
1247 : : }
1248 : : } else {
1249 [ # # # # ]: 0 : LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1250 : : }
1251 : 1 : }
1252 : :
1253 : : // scan for better chains in the block chain database, that are not yet connected in the active best chain
1254 : :
1255 : : // We can't hold cs_main during ActivateBestChain even though we're accessing
1256 : : // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1257 : : // the relevant pointers before the ABC call.
1258 [ + - + + : 3753 : for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
+ - ]
1259 [ + - ]: 942 : BlockValidationState state;
1260 [ + - - + : 942 : if (!chainstate->ActivateBestChain(state, nullptr)) {
- + ]
1261 [ # # # # : 0 : chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
# # ]
1262 : 0 : return;
1263 : : }
1264 : 942 : }
1265 : : // End scope of ImportingNow
1266 : 938 : }
1267 : :
1268 : 11 : std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1269 [ - + - ]: 11 : switch(type) {
1270 : 0 : case BlockfileType::NORMAL: os << "normal"; break;
1271 : 11 : case BlockfileType::ASSUMED: os << "assumed"; break;
1272 : 0 : default: os.setstate(std::ios_base::failbit);
1273 : : }
1274 : 11 : return os;
1275 : : }
1276 : :
1277 : 11 : std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1278 [ + - ]: 11 : os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1279 : 11 : return os;
1280 : : }
1281 : : } // namespace node
|