Branch data Line data Source code
1 : : // Copyright (c) 2011-2022 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <node/blockstorage.h>
6 : :
7 : : #include <arith_uint256.h>
8 : : #include <chain.h>
9 : : #include <consensus/params.h>
10 : : #include <consensus/validation.h>
11 : : #include <dbwrapper.h>
12 : : #include <flatfile.h>
13 : : #include <hash.h>
14 : : #include <kernel/blockmanager_opts.h>
15 : : #include <kernel/chainparams.h>
16 : : #include <kernel/messagestartchars.h>
17 : : #include <kernel/notifications_interface.h>
18 : : #include <logging.h>
19 : : #include <pow.h>
20 : : #include <primitives/block.h>
21 : : #include <primitives/transaction.h>
22 : : #include <random.h>
23 : : #include <serialize.h>
24 : : #include <signet.h>
25 : : #include <span.h>
26 : : #include <streams.h>
27 : : #include <sync.h>
28 : : #include <tinyformat.h>
29 : : #include <uint256.h>
30 : : #include <undo.h>
31 : : #include <util/batchpriority.h>
32 : : #include <util/check.h>
33 : : #include <util/fs.h>
34 : : #include <util/obfuscation.h>
35 : : #include <util/signalinterrupt.h>
36 : : #include <util/strencodings.h>
37 : : #include <util/syserror.h>
38 : : #include <util/translation.h>
39 : : #include <validation.h>
40 : :
41 : : #include <cstddef>
42 : : #include <map>
43 : : #include <optional>
44 : : #include <unordered_map>
45 : :
46 : : namespace kernel {
47 : : static constexpr uint8_t DB_BLOCK_FILES{'f'};
48 : : static constexpr uint8_t DB_BLOCK_INDEX{'b'};
49 : : static constexpr uint8_t DB_FLAG{'F'};
50 : : static constexpr uint8_t DB_REINDEX_FLAG{'R'};
51 : : static constexpr uint8_t DB_LAST_BLOCK{'l'};
52 : : // Keys used in previous version that might still be found in the DB:
53 : : // BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
54 : : // BlockTreeDB::DB_TXINDEX{'t'}
55 : : // BlockTreeDB::ReadFlag("txindex")
56 : :
57 : 2411 : bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
58 : : {
59 : 2411 : return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
60 : : }
61 : :
62 : 35 : bool BlockTreeDB::WriteReindexing(bool fReindexing)
63 : : {
64 [ + + ]: 35 : if (fReindexing) {
65 : 18 : return Write(DB_REINDEX_FLAG, uint8_t{'1'});
66 : : } else {
67 : 17 : return Erase(DB_REINDEX_FLAG);
68 : : }
69 : : }
70 : :
71 : 1138 : void BlockTreeDB::ReadReindexing(bool& fReindexing)
72 : : {
73 : 1138 : fReindexing = Exists(DB_REINDEX_FLAG);
74 : 1138 : }
75 : :
76 : 1139 : bool BlockTreeDB::ReadLastBlockFile(int& nFile)
77 : : {
78 : 1139 : return Read(DB_LAST_BLOCK, nFile);
79 : : }
80 : :
81 : 3280 : bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
82 : : {
83 : 3280 : CDBBatch batch(*this);
84 [ + - + + ]: 5017 : for (const auto& [file, info] : fileInfo) {
85 [ + - ]: 1737 : batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
86 : : }
87 [ + - ]: 3280 : batch.Write(DB_LAST_BLOCK, nLastFile);
88 [ + + ]: 156907 : for (const CBlockIndex* bi : blockinfo) {
89 [ + - ]: 153627 : batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
90 : : }
91 [ + - ]: 6560 : return WriteBatch(batch, true);
92 : 3280 : }
93 : :
94 : 15 : bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
95 : : {
96 [ - + + - ]: 15 : return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
97 : : }
98 : :
99 : 1138 : bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
100 : : {
101 : 1138 : uint8_t ch;
102 [ + - + + ]: 1138 : if (!Read(std::make_pair(DB_FLAG, name), ch)) {
103 : : return false;
104 : : }
105 : 16 : fValue = ch == uint8_t{'1'};
106 : 16 : return true;
107 : : }
108 : :
109 : 1142 : bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
110 : : {
111 : 1142 : AssertLockHeld(::cs_main);
112 [ + - ]: 1142 : std::unique_ptr<CDBIterator> pcursor(NewIterator());
113 [ + - ]: 1142 : pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
114 : :
115 : : // Load m_block_index
116 [ + - + + ]: 151647 : while (pcursor->Valid()) {
117 [ + - + + ]: 151198 : if (interrupt) return false;
118 : 151197 : std::pair<uint8_t, uint256> key;
119 [ + - + + : 151197 : if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
+ - ]
120 : 150505 : CDiskBlockIndex diskindex;
121 [ + - + - ]: 150505 : if (pcursor->GetValue(diskindex)) {
122 : : // Construct block index object
123 [ + - + - ]: 150505 : CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
124 [ + - ]: 150505 : pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
125 : 150505 : pindexNew->nHeight = diskindex.nHeight;
126 : 150505 : pindexNew->nFile = diskindex.nFile;
127 : 150505 : pindexNew->nDataPos = diskindex.nDataPos;
128 : 150505 : pindexNew->nUndoPos = diskindex.nUndoPos;
129 : 150505 : pindexNew->nVersion = diskindex.nVersion;
130 : 150505 : pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
131 : 150505 : pindexNew->nTime = diskindex.nTime;
132 : 150505 : pindexNew->nBits = diskindex.nBits;
133 : 150505 : pindexNew->nNonce = diskindex.nNonce;
134 : 150505 : pindexNew->nStatus = diskindex.nStatus;
135 : 150505 : pindexNew->nTx = diskindex.nTx;
136 : :
137 [ + - - + ]: 150505 : if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
138 [ # # # # ]: 0 : LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
139 : 0 : return false;
140 : : }
141 : :
142 [ + - ]: 150505 : pcursor->Next();
143 : : } else {
144 [ # # ]: 0 : LogError("%s: failed to read value\n", __func__);
145 : : return false;
146 : : }
147 : : } else {
148 : : break;
149 : : }
150 : : }
151 : :
152 : : return true;
153 : 1142 : }
154 : : } // namespace kernel
155 : :
156 : : namespace node {
157 : :
158 : 769124867 : bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
159 : : {
160 : : // First sort by most total work, ...
161 [ + + ]: 769124867 : if (pa->nChainWork > pb->nChainWork) return false;
162 [ + + ]: 506173947 : if (pa->nChainWork < pb->nChainWork) return true;
163 : :
164 : : // ... then by earliest time received, ...
165 [ + + ]: 3313990 : if (pa->nSequenceId < pb->nSequenceId) return false;
166 [ + + ]: 3273331 : if (pa->nSequenceId > pb->nSequenceId) return true;
167 : :
168 : : // Use pointer address as tie breaker (should only happen with blocks
169 : : // loaded from disk, as those all have id 0).
170 [ + + ]: 3255854 : if (pa < pb) return false;
171 [ + + ]: 3254015 : if (pa > pb) return true;
172 : :
173 : : // Identical blocks.
174 : : return false;
175 : : }
176 : :
177 : 2960122 : bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
178 : : {
179 : 2960122 : return pa->nHeight < pb->nHeight;
180 : : }
181 : :
182 : 2278 : std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
183 : : {
184 : 2278 : AssertLockHeld(cs_main);
185 : 2278 : std::vector<CBlockIndex*> rv;
186 [ + - ]: 2278 : rv.reserve(m_block_index.size());
187 [ + + + - ]: 302570 : for (auto& [_, block_index] : m_block_index) {
188 [ + - ]: 300292 : rv.push_back(&block_index);
189 : : }
190 : 2278 : return rv;
191 : 0 : }
192 : :
193 : 678129 : CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
194 : : {
195 : 678129 : AssertLockHeld(cs_main);
196 : 678129 : BlockMap::iterator it = m_block_index.find(hash);
197 [ + + ]: 678129 : return it == m_block_index.end() ? nullptr : &it->second;
198 : : }
199 : :
200 : 6 : const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
201 : : {
202 : 6 : AssertLockHeld(cs_main);
203 : 6 : BlockMap::const_iterator it = m_block_index.find(hash);
204 [ + + ]: 6 : return it == m_block_index.end() ? nullptr : &it->second;
205 : : }
206 : :
207 : 142905 : CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
208 : : {
209 : 142905 : AssertLockHeld(cs_main);
210 : :
211 [ + + ]: 142905 : auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
212 [ + + ]: 142905 : if (!inserted) {
213 : 3 : return &mi->second;
214 : : }
215 : 142902 : CBlockIndex* pindexNew = &(*mi).second;
216 : :
217 : : // We assign the sequence id to blocks only when the full data is available,
218 : : // to avoid miners withholding blocks but broadcasting headers, to get a
219 : : // competitive advantage.
220 : 142902 : pindexNew->nSequenceId = 0;
221 : :
222 : 142902 : pindexNew->phashBlock = &((*mi).first);
223 : 142902 : BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
224 [ + + ]: 142902 : if (miPrev != m_block_index.end()) {
225 : 142436 : pindexNew->pprev = &(*miPrev).second;
226 : 142436 : pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
227 : 142436 : pindexNew->BuildSkip();
228 : : }
229 [ + + + + ]: 142902 : pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
230 [ + + + - ]: 428706 : pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
231 : 142902 : pindexNew->RaiseValidity(BLOCK_VALID_TREE);
232 [ + + + + ]: 142902 : if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
233 : 122159 : best_header = pindexNew;
234 : : }
235 : :
236 : 142902 : m_dirty_blockindex.insert(pindexNew);
237 : :
238 : 142902 : return pindexNew;
239 : : }
240 : :
241 : 72 : void BlockManager::PruneOneBlockFile(const int fileNumber)
242 : : {
243 : 72 : AssertLockHeld(cs_main);
244 : 72 : LOCK(cs_LastBlockFile);
245 : :
246 [ + + + + ]: 127485 : for (auto& entry : m_block_index) {
247 : 127413 : CBlockIndex* pindex = &entry.second;
248 [ + + ]: 127413 : if (pindex->nFile == fileNumber) {
249 : 17429 : pindex->nStatus &= ~BLOCK_HAVE_DATA;
250 : 17429 : pindex->nStatus &= ~BLOCK_HAVE_UNDO;
251 : 17429 : pindex->nFile = 0;
252 : 17429 : pindex->nDataPos = 0;
253 : 17429 : pindex->nUndoPos = 0;
254 [ + - ]: 17429 : m_dirty_blockindex.insert(pindex);
255 : :
256 : : // Prune from m_blocks_unlinked -- any block we prune would have
257 : : // to be downloaded again in order to consider its chain, at which
258 : : // point it would be considered as a candidate for
259 : : // m_blocks_unlinked or setBlockIndexCandidates.
260 : 17429 : auto range = m_blocks_unlinked.equal_range(pindex->pprev);
261 [ - + ]: 17429 : while (range.first != range.second) {
262 : 0 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
263 : 0 : range.first++;
264 [ # # ]: 0 : if (_it->second == pindex) {
265 : 0 : m_blocks_unlinked.erase(_it);
266 : : }
267 : : }
268 : : }
269 : : }
270 : :
271 [ + - ]: 72 : m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
272 [ + - ]: 72 : m_dirty_fileinfo.insert(fileNumber);
273 : 72 : }
274 : :
275 : 27 : void BlockManager::FindFilesToPruneManual(
276 : : std::set<int>& setFilesToPrune,
277 : : int nManualPruneHeight,
278 : : const Chainstate& chain,
279 : : ChainstateManager& chainman)
280 : : {
281 [ + - - + ]: 27 : assert(IsPruneMode() && nManualPruneHeight > 0);
282 : :
283 [ + - ]: 27 : LOCK2(cs_main, cs_LastBlockFile);
284 [ - + ]: 27 : if (chain.m_chain.Height() < 0) {
285 [ # # ]: 0 : return;
286 : : }
287 : :
288 [ + - ]: 27 : const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
289 : :
290 : 27 : int count = 0;
291 [ + + ]: 149 : for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
292 [ + + ]: 122 : const auto& fileinfo = m_blockfile_info[fileNumber];
293 [ + + + + : 122 : if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
- + ]
294 : 69 : continue;
295 : : }
296 : :
297 [ + - ]: 53 : PruneOneBlockFile(fileNumber);
298 [ + - ]: 53 : setFilesToPrune.insert(fileNumber);
299 : 53 : count++;
300 : : }
301 [ + - + - ]: 27 : LogInfo("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs",
302 : : chain.GetRole(), last_block_can_prune, count);
303 [ - - + - ]: 54 : }
304 : :
305 : 592 : void BlockManager::FindFilesToPrune(
306 : : std::set<int>& setFilesToPrune,
307 : : int last_prune,
308 : : const Chainstate& chain,
309 : : ChainstateManager& chainman)
310 : : {
311 [ + - ]: 592 : LOCK2(cs_main, cs_LastBlockFile);
312 : : // Distribute our -prune budget over all chainstates.
313 [ + - ]: 592 : const auto target = std::max(
314 [ + - + + ]: 785 : MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
315 : 592 : const uint64_t target_sync_height = chainman.m_best_header->nHeight;
316 : :
317 [ + + + - ]: 592 : if (chain.m_chain.Height() < 0 || target == 0) {
318 : : return;
319 : : }
320 [ + + ]: 573 : if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
321 : : return;
322 : : }
323 : :
324 [ + - + - ]: 463 : const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
325 : :
326 [ + - ]: 463 : uint64_t nCurrentUsage = CalculateCurrentUsage();
327 : : // We don't check to prune until after we've allocated new space for files
328 : : // So we should leave a buffer under our target to account for another allocation
329 : : // before the next pruning.
330 : 463 : uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
331 : 463 : uint64_t nBytesToPrune;
332 : 463 : int count = 0;
333 : :
334 [ + + ]: 463 : if (nCurrentUsage + nBuffer >= target) {
335 : : // On a prune event, the chainstate DB is flushed.
336 : : // To avoid excessive prune events negating the benefit of high dbcache
337 : : // values, we should not prune too rapidly.
338 : : // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
339 [ + - ]: 23 : const auto chain_tip_height = chain.m_chain.Height();
340 [ + - - + : 23 : if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
- - ]
341 : : // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
342 : 0 : static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
343 : 0 : const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
344 : 0 : nBuffer += average_block_size * remaining_blocks;
345 : : }
346 : :
347 [ + + ]: 169 : for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
348 [ + + ]: 158 : const auto& fileinfo = m_blockfile_info[fileNumber];
349 : 158 : nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
350 : :
351 [ + + ]: 158 : if (fileinfo.nSize == 0) {
352 : 86 : continue;
353 : : }
354 : :
355 [ + + ]: 72 : if (nCurrentUsage + nBuffer < target) { // are we below our target?
356 : : break;
357 : : }
358 : :
359 : : // don't prune files that could have a block that's not within the allowable
360 : : // prune range for the chain being pruned.
361 [ + + - + ]: 60 : if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
362 : 44 : continue;
363 : : }
364 : :
365 [ + - ]: 16 : PruneOneBlockFile(fileNumber);
366 : : // Queue up the files for removal
367 [ + - ]: 16 : setFilesToPrune.insert(fileNumber);
368 : 16 : nCurrentUsage -= nBytesToPrune;
369 : 16 : count++;
370 : : }
371 : : }
372 : :
373 [ + - + - : 463 : LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
+ - + - +
- ]
374 : : chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
375 : : (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
376 : : min_block_to_prune, last_block_can_prune, count);
377 [ + - + - ]: 1184 : }
378 : :
379 : 20527 : void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
380 : 20527 : AssertLockHeld(::cs_main);
381 : 20527 : m_prune_locks[name] = lock_info;
382 : 20527 : }
383 : :
384 : 301010 : CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
385 : : {
386 : 301010 : AssertLockHeld(cs_main);
387 : :
388 [ + + ]: 301010 : if (hash.IsNull()) {
389 : : return nullptr;
390 : : }
391 : :
392 [ + + ]: 300318 : const auto [mi, inserted]{m_block_index.try_emplace(hash)};
393 [ + + ]: 300318 : CBlockIndex* pindex = &(*mi).second;
394 [ + + ]: 300318 : if (inserted) {
395 : 149533 : pindex->phashBlock = &((*mi).first);
396 : : }
397 : : return pindex;
398 : : }
399 : :
400 : 1142 : bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
401 : : {
402 [ + - + + ]: 1142 : if (!m_block_tree_db->LoadBlockIndexGuts(
403 [ + - ]: 302152 : GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
404 : : return false;
405 : : }
406 : :
407 [ + + ]: 1141 : if (snapshot_blockhash) {
408 : 11 : const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
409 [ + + ]: 11 : if (!maybe_au_data) {
410 [ + - + - ]: 2 : m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
411 : 1 : return false;
412 : : }
413 : 10 : const AssumeutxoData& au_data = *Assert(maybe_au_data);
414 : 10 : m_snapshot_height = au_data.height;
415 : 10 : CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
416 : :
417 : : // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
418 : : // to disk, we must bootstrap the value for assumedvalid chainstates
419 : : // from the hardcoded assumeutxo chainparams.
420 : 10 : base->m_chain_tx_count = au_data.m_chain_tx_count;
421 [ + - ]: 20 : LogInfo("[snapshot] set m_chain_tx_count=%d for %s", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
422 : : } else {
423 : : // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
424 : : // is null. This is relevant during snapshot completion, when the blockman may be loaded
425 : : // with a height that then needs to be cleared after the snapshot is fully validated.
426 [ + + ]: 1130 : m_snapshot_height.reset();
427 : : }
428 : :
429 : 1140 : Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
430 : :
431 : : // Calculate nChainWork
432 : 1140 : std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
433 [ + - ]: 1140 : std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
434 : : CBlockIndexHeightOnlyComparator());
435 : :
436 : 1140 : CBlockIndex* previous_index{nullptr};
437 [ + + ]: 151316 : for (CBlockIndex* pindex : vSortedByHeight) {
438 [ + - + - ]: 150177 : if (m_interrupt) return false;
439 [ + + + + ]: 150177 : if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
440 [ + - ]: 1 : LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
441 : : return false;
442 : : }
443 : 150176 : previous_index = pindex;
444 [ + - + + : 450528 : pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ - ]
445 [ + + + + ]: 150176 : pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
446 : :
447 : : // We can link the chain of blocks for which we've received transactions at some point, or
448 : : // blocks that are assumed-valid on the basis of snapshot load (see
449 : : // PopulateAndValidateSnapshot()).
450 : : // Pruned nodes may have deleted the block.
451 [ + + ]: 150176 : if (pindex->nTx > 0) {
452 [ + + ]: 149002 : if (pindex->pprev) {
453 [ + + + + : 148314 : if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
+ - ]
454 [ + - ]: 4 : pindex->GetBlockHash() == *snapshot_blockhash) {
455 : : // Should have been set above; don't disturb it with code below.
456 [ + - ]: 4 : Assert(pindex->m_chain_tx_count > 0);
457 [ + + ]: 148306 : } else if (pindex->pprev->m_chain_tx_count > 0) {
458 : 148299 : pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
459 : : } else {
460 : 7 : pindex->m_chain_tx_count = 0;
461 [ + - ]: 7 : m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
462 : : }
463 : : } else {
464 : 692 : pindex->m_chain_tx_count = pindex->nTx;
465 : : }
466 : : }
467 [ + + + + : 150176 : if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
- + ]
468 : 0 : pindex->nStatus |= BLOCK_FAILED_CHILD;
469 [ # # ]: 0 : m_dirty_blockindex.insert(pindex);
470 : : }
471 [ + + ]: 150176 : if (pindex->pprev) {
472 [ + - ]: 149461 : pindex->BuildSkip();
473 : : }
474 : : }
475 : :
476 : : return true;
477 : 1140 : }
478 : :
479 : 3280 : bool BlockManager::WriteBlockIndexDB()
480 : : {
481 : 3280 : AssertLockHeld(::cs_main);
482 : 3280 : std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
483 [ + - ]: 3280 : vFiles.reserve(m_dirty_fileinfo.size());
484 [ + + ]: 5017 : for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
485 [ + - ]: 1737 : vFiles.emplace_back(*it, &m_blockfile_info[*it]);
486 : 1737 : m_dirty_fileinfo.erase(it++);
487 : : }
488 : 3280 : std::vector<const CBlockIndex*> vBlocks;
489 [ + - ]: 3280 : vBlocks.reserve(m_dirty_blockindex.size());
490 [ + + ]: 156907 : for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
491 [ + - ]: 153627 : vBlocks.push_back(*it);
492 : 153627 : m_dirty_blockindex.erase(it++);
493 : : }
494 [ + - + - ]: 6560 : int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
495 [ + - - + ]: 3280 : if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
496 : 0 : return false;
497 : : }
498 : : return true;
499 : 3280 : }
500 : :
501 : 1142 : bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
502 : : {
503 [ + + ]: 1142 : if (!LoadBlockIndex(snapshot_blockhash)) {
504 : : return false;
505 : : }
506 : 1139 : int max_blockfile_num{0};
507 : :
508 : : // Load block file info
509 : 1139 : m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
510 : 1139 : m_blockfile_info.resize(max_blockfile_num + 1);
511 : 1139 : LogInfo("Loading block index db: last block file = %i", max_blockfile_num);
512 [ + + ]: 2411 : for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
513 : 1272 : m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
514 : : }
515 [ + - ]: 1139 : LogInfo("Loading block index db: last block file info: %s", m_blockfile_info[max_blockfile_num].ToString());
516 : 1139 : for (int nFile = max_blockfile_num + 1; true; nFile++) {
517 : 1139 : CBlockFileInfo info;
518 [ - + ]: 1139 : if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
519 : 0 : m_blockfile_info.push_back(info);
520 : : } else {
521 : : break;
522 : : }
523 : 0 : }
524 : :
525 : : // Check presence of blk files
526 : 1139 : LogInfo("Checking all blk files are present...");
527 : 1139 : std::set<int> setBlkDataFiles;
528 [ + + + + ]: 151289 : for (const auto& [_, block_index] : m_block_index) {
529 [ + + ]: 150150 : if (block_index.nStatus & BLOCK_HAVE_DATA) {
530 [ + - ]: 134426 : setBlkDataFiles.insert(block_index.nFile);
531 : : }
532 : : }
533 [ + + ]: 1902 : for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
534 [ + - ]: 764 : FlatFilePos pos(*it, 0);
535 [ + - + + ]: 764 : if (OpenBlockFile(pos, /*fReadOnly=*/true).IsNull()) {
536 : : return false;
537 : : }
538 : : }
539 : :
540 : 1138 : {
541 : : // Initialize the blockfile cursors.
542 [ + - ]: 1138 : LOCK(cs_LastBlockFile);
543 [ + + ]: 2409 : for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
544 [ + - ]: 1271 : const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
545 [ + - + + ]: 2542 : m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
546 : : }
547 : 0 : }
548 : :
549 : : // Check whether we have ever pruned block & undo files
550 [ + - + - ]: 1138 : m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
551 [ + + ]: 1138 : if (m_have_pruned) {
552 [ + - ]: 16 : LogInfo("Loading block index db: Block files have previously been pruned");
553 : : }
554 : :
555 : : // Check whether we need to continue reindexing
556 : 1138 : bool fReindexing = false;
557 [ + - ]: 1138 : m_block_tree_db->ReadReindexing(fReindexing);
558 [ + + ]: 1138 : if (fReindexing) m_blockfiles_indexed = false;
559 : :
560 : : return true;
561 : 1139 : }
562 : :
563 : 1141 : void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
564 : : {
565 : 1141 : AssertLockHeld(::cs_main);
566 [ + - ]: 2282 : int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
567 [ + + ]: 1141 : if (!m_have_pruned) {
568 : : return;
569 : : }
570 : :
571 : 18 : std::set<int> block_files_to_prune;
572 [ + + ]: 111 : for (int file_number = 0; file_number < max_blockfile; file_number++) {
573 [ + + ]: 93 : if (m_blockfile_info[file_number].nSize == 0) {
574 [ + - ]: 63 : block_files_to_prune.insert(file_number);
575 : : }
576 : : }
577 : :
578 [ + - ]: 18 : UnlinkPrunedFiles(block_files_to_prune);
579 : 18 : }
580 : :
581 : 412 : bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
582 : : {
583 : 412 : AssertLockHeld(::cs_main);
584 [ + + + - : 412 : return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
- + ]
585 : : }
586 : :
587 : 109 : const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
588 : : {
589 : 109 : AssertLockHeld(::cs_main);
590 : 109 : const CBlockIndex* last_block = &upper_block;
591 [ - + ]: 109 : assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
592 [ + + + + ]: 40351 : while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
593 [ + + ]: 40253 : if (lower_block) {
594 : : // Return if we reached the lower_block
595 [ + + ]: 40104 : if (last_block == lower_block) return lower_block;
596 : : // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
597 : : // and so far this is not allowed.
598 [ - + ]: 40093 : assert(last_block->nHeight >= lower_block->nHeight);
599 : : }
600 : : last_block = last_block->pprev;
601 : : }
602 [ - + ]: 98 : assert(last_block != nullptr);
603 : : return last_block;
604 : : }
605 : :
606 : 38 : bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
607 : : {
608 [ + - ]: 38 : if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
609 : 38 : return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
610 : : }
611 : :
612 : : // If we're using -prune with -reindex, then delete block files that will be ignored by the
613 : : // reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
614 : : // is missing, do the same here to delete any later block files after a gap. Also delete all
615 : : // rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
616 : : // is in sync with what's actually on disk by the time we start downloading, so that pruning
617 : : // works correctly.
618 : 5 : void BlockManager::CleanupBlockRevFiles() const
619 : : {
620 [ + - ]: 5 : std::map<std::string, fs::path> mapBlockFiles;
621 : :
622 : : // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
623 : : // Remove the rev files immediately and insert the blk file paths into an
624 : : // ordered map keyed by block file index.
625 [ + - ]: 5 : LogInfo("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune");
626 [ + - + + : 49 : for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
+ + ]
627 [ + - + - ]: 156 : const std::string path = fs::PathToString(it->path().filename());
628 [ + - ]: 39 : if (fs::is_regular_file(*it) &&
629 [ + + + + ]: 39 : path.length() == 12 &&
630 [ + - ]: 24 : path.ends_with(".dat"))
631 : : {
632 [ + + ]: 24 : if (path.starts_with("blk")) {
633 [ + - + - : 24 : mapBlockFiles[path.substr(3, 5)] = it->path();
+ - ]
634 [ + - ]: 12 : } else if (path.starts_with("rev")) {
635 [ + - ]: 12 : remove(it->path());
636 : : }
637 : : }
638 [ + - ]: 39 : }
639 : :
640 : : // Remove all block files that aren't part of a contiguous set starting at
641 : : // zero by walking the ordered map (keys are block file indices) by
642 : : // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
643 : : // start removing block files.
644 : 5 : int nContigCounter = 0;
645 [ + + ]: 17 : for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
646 [ + - + + ]: 12 : if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
647 : 1 : nContigCounter++;
648 : 1 : continue;
649 : : }
650 [ + - ]: 11 : remove(item.second);
651 : : }
652 : 5 : }
653 : :
654 : 3 : CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
655 : : {
656 : 3 : LOCK(cs_LastBlockFile);
657 : :
658 [ + - + - ]: 3 : return &m_blockfile_info.at(n);
659 : 3 : }
660 : :
661 : 53285 : bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const
662 : : {
663 [ + - ]: 106570 : const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
664 : :
665 : : // Open history file to read
666 : 53285 : AutoFile file{OpenUndoFile(pos, true)};
667 [ + + ]: 53285 : if (file.IsNull()) {
668 [ + - + - ]: 7 : LogError("OpenUndoFile failed for %s while reading block undo", pos.ToString());
669 : 7 : return false;
670 : : }
671 [ + - ]: 53278 : BufferedReader filein{std::move(file)};
672 : :
673 : 53278 : try {
674 : : // Read block
675 [ + - ]: 53278 : HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243
676 : :
677 [ + - ]: 53278 : verifier << index.pprev->GetBlockHash();
678 [ + + ]: 53278 : verifier >> blockundo;
679 : :
680 : 53277 : uint256 hashChecksum;
681 [ + - ]: 53277 : filein >> hashChecksum;
682 : :
683 : : // Verify checksum
684 [ + - - + ]: 53277 : if (hashChecksum != verifier.GetHash()) {
685 [ # # # # ]: 0 : LogError("Checksum mismatch at %s while reading block undo", pos.ToString());
686 : 0 : return false;
687 : : }
688 [ - + ]: 1 : } catch (const std::exception& e) {
689 [ + - + - ]: 1 : LogError("Deserialize or I/O error - %s at %s while reading block undo", e.what(), pos.ToString());
690 : 1 : return false;
691 : 1 : }
692 : :
693 : : return true;
694 : 53285 : }
695 : :
696 : 3384 : bool BlockManager::FlushUndoFile(int block_file, bool finalize)
697 : : {
698 : 3384 : FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
699 [ - + ]: 3384 : if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
700 [ # # ]: 0 : m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
701 : 0 : return false;
702 : : }
703 : : return true;
704 : : }
705 : :
706 : 3383 : bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
707 : : {
708 : 3383 : bool success = true;
709 : 3383 : LOCK(cs_LastBlockFile);
710 : :
711 [ + - ]: 3383 : if (m_blockfile_info.size() < 1) {
712 : : // Return if we haven't loaded any blockfiles yet. This happens during
713 : : // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
714 : : // then calls FlushStateToDisk()), resulting in a call to this function before we
715 : : // have populated `m_blockfile_info` via LoadBlockIndexDB().
716 : : return true;
717 : : }
718 [ - + ]: 3383 : assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
719 : :
720 [ + - ]: 3383 : FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
721 [ + - - + ]: 3383 : if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
722 [ # # # # ]: 0 : m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
723 : 0 : success = false;
724 : : }
725 : : // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
726 : : // e.g. during IBD or a sync after a node going offline
727 [ + - ]: 3383 : if (!fFinalize || finalize_undo) {
728 [ + - - + ]: 3383 : if (!FlushUndoFile(blockfile_num, finalize_undo)) {
729 : 0 : success = false;
730 : : }
731 : : }
732 : : return success;
733 : 3383 : }
734 : :
735 : 263366 : BlockfileType BlockManager::BlockfileTypeForHeight(int height)
736 : : {
737 [ + + ]: 263366 : if (!m_snapshot_height) {
738 : : return BlockfileType::NORMAL;
739 : : }
740 [ + + ]: 3725 : return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
741 : : }
742 : :
743 : 3280 : bool BlockManager::FlushChainstateBlockFile(int tip_height)
744 : : {
745 : 3280 : LOCK(cs_LastBlockFile);
746 [ + - + + ]: 3280 : auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
747 : : // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
748 : : // but no blocks past the snapshot height have been written yet, so there
749 : : // is no data associated with the chainstate, and it is safe not to flush.
750 [ + + ]: 3280 : if (cursor) {
751 [ + - ]: 3255 : return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
752 : : }
753 : : // No need to log warnings in this case.
754 : : return true;
755 : 3280 : }
756 : :
757 : 22014 : uint64_t BlockManager::CalculateCurrentUsage()
758 : : {
759 : 22014 : LOCK(cs_LastBlockFile);
760 : :
761 : 22014 : uint64_t retval = 0;
762 [ + + ]: 46318 : for (const CBlockFileInfo& file : m_blockfile_info) {
763 : 24304 : retval += file.nSize + file.nUndoSize;
764 : : }
765 [ + - ]: 22014 : return retval;
766 : 22014 : }
767 : :
768 : 55 : void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
769 : : {
770 : 55 : std::error_code ec;
771 [ + + ]: 189 : for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
772 : 134 : FlatFilePos pos(*it, 0);
773 : 134 : const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
774 : 134 : const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
775 [ + + ]: 134 : if (removed_blockfile || removed_undofile) {
776 [ + - ]: 72 : LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
777 : : }
778 : : }
779 : 55 : }
780 : :
781 : 289684 : AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
782 : : {
783 : 289684 : return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation};
784 : : }
785 : :
786 : : /** Open an undo file (rev?????.dat) */
787 : 178093 : AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
788 : : {
789 : 178093 : return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation};
790 : : }
791 : :
792 : 33 : fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
793 : : {
794 : 33 : return m_block_file_seq.FileName(pos);
795 : : }
796 : :
797 : 126253 : FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
798 : : {
799 : 126253 : LOCK(cs_LastBlockFile);
800 : :
801 [ + - ]: 126253 : const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
802 : :
803 [ + + ]: 126253 : if (!m_blockfile_cursors[chain_type]) {
804 : : // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
805 [ - + ]: 11 : assert(chain_type == BlockfileType::ASSUMED);
806 : 11 : const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
807 [ + - ]: 11 : m_blockfile_cursors[chain_type] = new_cursor;
808 [ + - + - : 11 : LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
+ - ]
809 : : }
810 [ + + ]: 126253 : const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
811 : :
812 : 126253 : int nFile = last_blockfile;
813 [ + + ]: 126253 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
814 [ + - ]: 18 : m_blockfile_info.resize(nFile + 1);
815 : : }
816 : :
817 : 126253 : bool finalize_undo = false;
818 : 126253 : unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
819 : : // Use smaller blockfiles in test-only -fastprune mode - but avoid
820 : : // the possibility of having a block not fit into the block file.
821 [ + + ]: 126253 : if (m_opts.fast_prune) {
822 : 17888 : max_blockfile_size = 0x10000; // 64kiB
823 [ + + ]: 17888 : if (nAddSize >= max_blockfile_size) {
824 : : // dynamically adjust the blockfile size to be larger than the added size
825 : 2 : max_blockfile_size = nAddSize + 1;
826 : : }
827 : : }
828 [ - + ]: 126253 : assert(nAddSize < max_blockfile_size);
829 : :
830 [ + + ]: 126381 : while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
831 : : // when the undo file is keeping up with the block file, we want to flush it explicitly
832 : : // when it is lagging behind (more blocks arrive than are being connected), we let the
833 : : // undo block write case handle it
834 : 256 : finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
835 [ + - + - ]: 128 : Assert(m_blockfile_cursors[chain_type])->undo_height);
836 : :
837 : : // Try the next unclaimed blockfile number
838 : 128 : nFile = this->MaxBlockfileNum() + 1;
839 : : // Set to increment MaxBlockfileNum() for next iteration
840 [ + - ]: 128 : m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
841 : :
842 [ + - ]: 128 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
843 [ + - ]: 128 : m_blockfile_info.resize(nFile + 1);
844 : : }
845 : : }
846 : 126253 : FlatFilePos pos;
847 : 126253 : pos.nFile = nFile;
848 : 126253 : pos.nPos = m_blockfile_info[nFile].nSize;
849 : :
850 [ + + ]: 126253 : if (nFile != last_blockfile) {
851 [ + - + - : 256 : LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
+ - + - ]
852 : : last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
853 : :
854 : : // Do not propagate the return code. The flush concerns a previous block
855 : : // and undo file that has already been written to. If a flush fails
856 : : // here, and we crash, there is no expected additional block data
857 : : // inconsistency arising from the flush failure here. However, the undo
858 : : // data may be inconsistent after a crash if the flush is called during
859 : : // a reindex. A flush error might also leave some of the data files
860 : : // untrimmed.
861 [ + - - + ]: 128 : if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
862 [ # # # # : 0 : LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
# # ]
863 : : "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
864 : : last_blockfile, finalize_undo, nFile);
865 : : }
866 : : // No undo data yet in the new file, so reset our undo-height tracking.
867 [ + - ]: 128 : m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
868 : : }
869 : :
870 : 126253 : m_blockfile_info[nFile].AddBlock(nHeight, nTime);
871 [ + - ]: 126253 : m_blockfile_info[nFile].nSize += nAddSize;
872 : :
873 : 126253 : bool out_of_space;
874 [ + - ]: 126253 : size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
875 [ - + ]: 126253 : if (out_of_space) {
876 [ # # # # ]: 0 : m_opts.notifications.fatalError(_("Disk space is too low!"));
877 : 0 : return {};
878 : : }
879 [ + + + + ]: 126253 : if (bytes_allocated != 0 && IsPruneMode()) {
880 : 446 : m_check_for_pruning = true;
881 : : }
882 : :
883 [ + - ]: 126253 : m_dirty_fileinfo.insert(nFile);
884 : 126253 : return pos;
885 : 126253 : }
886 : :
887 : 2431 : void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
888 : : {
889 : 2431 : LOCK(cs_LastBlockFile);
890 : :
891 : : // Update the cursor so it points to the last file.
892 [ + - ]: 2431 : const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)};
893 [ + - ]: 2431 : auto& cursor{m_blockfile_cursors[chain_type]};
894 [ + - + + ]: 2431 : if (!cursor || cursor->file_num < pos.nFile) {
895 [ + - ]: 1 : m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
896 : : }
897 : :
898 : : // Update the file information with the current block.
899 : 2431 : const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
900 : 2431 : const int nFile = pos.nFile;
901 [ + + ]: 2431 : if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
902 [ + - ]: 15 : m_blockfile_info.resize(nFile + 1);
903 : : }
904 : 2431 : m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
905 [ + + ]: 2431 : m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
906 [ + - ]: 2431 : m_dirty_fileinfo.insert(nFile);
907 : 2431 : }
908 : :
909 : 124808 : bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
910 : : {
911 : 124808 : pos.nFile = nFile;
912 : :
913 : 124808 : LOCK(cs_LastBlockFile);
914 : :
915 [ + - ]: 124808 : pos.nPos = m_blockfile_info[nFile].nUndoSize;
916 : 124808 : m_blockfile_info[nFile].nUndoSize += nAddSize;
917 [ + - ]: 124808 : m_dirty_fileinfo.insert(nFile);
918 : :
919 : 124808 : bool out_of_space;
920 [ + - ]: 124808 : size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
921 [ - + ]: 124808 : if (out_of_space) {
922 [ # # # # ]: 0 : return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
923 : : }
924 [ + + + + ]: 124808 : if (bytes_allocated != 0 && IsPruneMode()) {
925 : 100 : m_check_for_pruning = true;
926 : : }
927 : :
928 : : return true;
929 : 124808 : }
930 : :
931 : 130131 : bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
932 : : {
933 : 130131 : AssertLockHeld(::cs_main);
934 : 130131 : const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
935 [ + + + - ]: 260262 : auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
936 : :
937 : : // Write undo information to disk
938 [ + + ]: 130131 : if (block.GetUndoPos().IsNull()) {
939 : 124808 : FlatFilePos pos;
940 : 124808 : const auto blockundo_size{static_cast<uint32_t>(GetSerializeSize(blockundo))};
941 [ - + ]: 124808 : if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
942 [ # # ]: 0 : LogError("FindUndoPos failed for %s while writing block undo", pos.ToString());
943 : 0 : return false;
944 : : }
945 : :
946 : : // Open history file to append
947 : 124808 : AutoFile file{OpenUndoFile(pos)};
948 [ - + ]: 124808 : if (file.IsNull()) {
949 [ # # # # ]: 0 : LogError("OpenUndoFile failed for %s while writing block undo", pos.ToString());
950 [ # # # # ]: 0 : return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
951 : : }
952 : 124808 : {
953 [ + - ]: 124808 : BufferedWriter fileout{file};
954 : :
955 : : // Write index header
956 [ + - + - ]: 124808 : fileout << GetParams().MessageStart() << blockundo_size;
957 : 124808 : pos.nPos += STORAGE_HEADER_BYTES;
958 : 124808 : {
959 : : // Calculate checksum
960 [ + - ]: 124808 : HashWriter hasher{};
961 [ + - + - ]: 124808 : hasher << block.pprev->GetBlockHash() << blockundo;
962 : : // Write undo data & checksum
963 [ + - + - ]: 249616 : fileout << blockundo << hasher.GetHash();
964 : : }
965 : : // BufferedWriter will flush pending data to file when fileout goes out of scope.
966 : 0 : }
967 : :
968 : : // Make sure that the file is closed before we call `FlushUndoFile`.
969 [ + - - + ]: 249616 : if (file.fclose() != 0) {
970 [ # # # # : 0 : LogError("Failed to close block undo file %s: %s", pos.ToString(), SysErrorString(errno));
# # ]
971 [ # # # # ]: 0 : return FatalError(m_opts.notifications, state, _("Failed to close block undo file."));
972 : : }
973 : :
974 : : // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
975 : : // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
976 : : // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
977 : : // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
978 : : // the FindNextBlockPos function
979 [ + + + + ]: 124808 : if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) {
980 : : // Do not propagate the return code, a failed flush here should not
981 : : // be an indication for a failed write. If it were propagated here,
982 : : // the caller would assume the undo data not to be written, when in
983 : : // fact it is. Note though, that a failed flush might leave the data
984 : : // file untrimmed.
985 [ + - - + ]: 1 : if (!FlushUndoFile(pos.nFile, true)) {
986 [ # # # # : 0 : LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile);
# # ]
987 : : }
988 [ + + + + ]: 124807 : } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
989 : 115134 : cursor.undo_height = block.nHeight;
990 : : }
991 : : // update nUndoPos in block index
992 : 124808 : block.nUndoPos = pos.nPos;
993 : 124808 : block.nStatus |= BLOCK_HAVE_UNDO;
994 [ + - ]: 124808 : m_dirty_blockindex.insert(&block);
995 : 124808 : }
996 : :
997 : : return true;
998 : : }
999 : :
1000 : 122339 : bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional<uint256>& expected_hash) const
1001 : : {
1002 : 122339 : block.SetNull();
1003 : :
1004 : : // Open history file to read
1005 : 122339 : std::vector<std::byte> block_data;
1006 [ + - + + ]: 122339 : if (!ReadRawBlock(block_data, pos)) {
1007 : : return false;
1008 : : }
1009 : :
1010 : 122233 : try {
1011 : : // Read block
1012 [ + - ]: 122233 : SpanReader{block_data} >> TX_WITH_WITNESS(block);
1013 [ - - ]: 0 : } catch (const std::exception& e) {
1014 [ - - - - ]: 0 : LogError("Deserialize or I/O error - %s at %s while reading block", e.what(), pos.ToString());
1015 : 0 : return false;
1016 : 0 : }
1017 : :
1018 [ + - ]: 122233 : const auto block_hash{block.GetHash()};
1019 : :
1020 : : // Check the header
1021 [ + - + + ]: 122233 : if (!CheckProofOfWork(block_hash, block.nBits, GetConsensus())) {
1022 [ + - + - ]: 3 : LogError("Errors in block header at %s while reading block", pos.ToString());
1023 : 3 : return false;
1024 : : }
1025 : :
1026 : : // Signet only: check block solution
1027 [ + + + - : 122230 : if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
- + ]
1028 [ # # # # ]: 0 : LogError("Errors in block solution at %s while reading block", pos.ToString());
1029 : 0 : return false;
1030 : : }
1031 : :
1032 [ + + + + ]: 122230 : if (expected_hash && block_hash != *expected_hash) {
1033 [ + - + - : 2 : LogError("GetHash() doesn't match index at %s while reading block (%s != %s)",
+ - + - ]
1034 : : pos.ToString(), block_hash.ToString(), expected_hash->ToString());
1035 : 1 : return false;
1036 : : }
1037 : :
1038 : : return true;
1039 : 122339 : }
1040 : :
1041 : 116675 : bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
1042 : : {
1043 [ + - ]: 233350 : const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1044 : 116675 : return ReadBlock(block, block_pos, index.GetBlockHash());
1045 : : }
1046 : :
1047 : 162607 : bool BlockManager::ReadRawBlock(std::vector<std::byte>& block, const FlatFilePos& pos) const
1048 : : {
1049 [ + + ]: 162607 : if (pos.nPos < STORAGE_HEADER_BYTES) {
1050 : : // If nPos is less than STORAGE_HEADER_BYTES, we can't read the header that precedes the block data
1051 : : // This would cause an unsigned integer underflow when trying to position the file cursor
1052 : : // This can happen after pruning or default constructed positions
1053 [ + - ]: 103 : LogError("Failed for %s while reading raw block storage header", pos.ToString());
1054 : 103 : return false;
1055 : : }
1056 : 162504 : AutoFile filein{OpenBlockFile({pos.nFile, pos.nPos - STORAGE_HEADER_BYTES}, /*fReadOnly=*/true)};
1057 [ + + ]: 162504 : if (filein.IsNull()) {
1058 [ + - + - ]: 4 : LogError("OpenBlockFile failed for %s while reading raw block", pos.ToString());
1059 : 4 : return false;
1060 : : }
1061 : :
1062 : 162500 : try {
1063 : 162500 : MessageStartChars blk_start;
1064 : 162500 : unsigned int blk_size;
1065 : :
1066 [ + - + - ]: 162500 : filein >> blk_start >> blk_size;
1067 : :
1068 [ + + ]: 162500 : if (blk_start != GetParams().MessageStart()) {
1069 [ + - + - : 2 : LogError("Block magic mismatch for %s: %s versus expected %s while reading raw block",
+ - + - ]
1070 : : pos.ToString(), HexStr(blk_start), HexStr(GetParams().MessageStart()));
1071 : 1 : return false;
1072 : : }
1073 : :
1074 [ - + ]: 162499 : if (blk_size > MAX_SIZE) {
1075 [ # # # # ]: 0 : LogError("Block data is larger than maximum deserialization size for %s: %s versus %s while reading raw block",
1076 : : pos.ToString(), blk_size, MAX_SIZE);
1077 : 0 : return false;
1078 : : }
1079 : :
1080 [ + - ]: 162499 : block.resize(blk_size); // Zeroing of memory is intentional here
1081 [ + - ]: 162499 : filein.read(block);
1082 [ - - ]: 0 : } catch (const std::exception& e) {
1083 [ - - - - ]: 0 : LogError("Read from block file failed: %s for %s while reading raw block", e.what(), pos.ToString());
1084 : 0 : return false;
1085 : 0 : }
1086 : :
1087 : : return true;
1088 : 162504 : }
1089 : :
1090 : 126253 : FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight)
1091 : : {
1092 : 126253 : const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
1093 : 126253 : FlatFilePos pos{FindNextBlockPos(block_size + STORAGE_HEADER_BYTES, nHeight, block.GetBlockTime())};
1094 [ - + ]: 126253 : if (pos.IsNull()) {
1095 [ # # ]: 0 : LogError("FindNextBlockPos failed for %s while writing block", pos.ToString());
1096 : 0 : return FlatFilePos();
1097 : : }
1098 : 126253 : AutoFile file{OpenBlockFile(pos, /*fReadOnly=*/false)};
1099 [ - + ]: 126253 : if (file.IsNull()) {
1100 [ # # # # ]: 0 : LogError("OpenBlockFile failed for %s while writing block", pos.ToString());
1101 [ # # # # ]: 0 : m_opts.notifications.fatalError(_("Failed to write block."));
1102 : 0 : return FlatFilePos();
1103 : : }
1104 : 126253 : {
1105 [ + - ]: 126253 : BufferedWriter fileout{file};
1106 : :
1107 : : // Write index header
1108 [ + - + - ]: 126253 : fileout << GetParams().MessageStart() << block_size;
1109 : 126253 : pos.nPos += STORAGE_HEADER_BYTES;
1110 : : // Write block
1111 [ + - ]: 252506 : fileout << TX_WITH_WITNESS(block);
1112 : 0 : }
1113 : :
1114 [ + - - + ]: 252506 : if (file.fclose() != 0) {
1115 [ # # # # : 0 : LogError("Failed to close block file %s: %s", pos.ToString(), SysErrorString(errno));
# # ]
1116 [ # # # # ]: 0 : m_opts.notifications.fatalError(_("Failed to close file when writing block."));
1117 : 0 : return FlatFilePos();
1118 : : }
1119 : :
1120 : 126253 : return pos;
1121 : 126253 : }
1122 : :
1123 : 1168 : static auto InitBlocksdirXorKey(const BlockManager::Options& opts)
1124 : : {
1125 : : // Bytes are serialized without length indicator, so this is also the exact
1126 : : // size of the XOR-key file.
1127 : 1168 : std::array<std::byte, Obfuscation::KEY_SIZE> obfuscation{};
1128 : :
1129 : : // Consider this to be the first run if the blocksdir contains only hidden
1130 : : // files (those which start with a .). Checking for a fully-empty dir would
1131 : : // be too aggressive as a .lock file may have already been written.
1132 : 1168 : bool first_run = true;
1133 [ + + + + : 5128 : for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) {
+ + + + +
+ ]
1134 [ + - + - ]: 3960 : const std::string path = fs::PathToString(entry.path().filename());
1135 [ + - + - ]: 1264 : if (!entry.is_regular_file() || !path.starts_with('.')) {
1136 : 716 : first_run = false;
1137 : 716 : break;
1138 : : }
1139 [ + - + + : 2158 : }
+ + - - ]
1140 : :
1141 [ + + + + ]: 1168 : if (opts.use_xor && first_run) {
1142 : : // Only use random fresh key when the boolean option is set and on the
1143 : : // very first start of the program.
1144 : 452 : FastRandomContext{}.fillrand(obfuscation);
1145 : : }
1146 : :
1147 [ + - ]: 2336 : const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1148 [ + - + + ]: 1168 : if (fs::exists(xor_key_path)) {
1149 : : // A pre-existing xor key file has priority.
1150 [ + - + - ]: 1428 : AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1151 [ + - ]: 714 : xor_key_file >> obfuscation;
1152 : 714 : } else {
1153 : : // Create initial or missing xor key file
1154 : 454 : AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1155 : : #ifdef __MINGW64__
1156 : : "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1157 : : #else
1158 : : "wbx"
1159 : : #endif
1160 [ + - + - ]: 908 : )};
1161 [ + - ]: 454 : xor_key_file << obfuscation;
1162 [ + - - + ]: 908 : if (xor_key_file.fclose() != 0) {
1163 : 0 : throw std::runtime_error{strprintf("Error closing XOR key file %s: %s",
1164 [ # # ]: 0 : fs::PathToString(xor_key_path),
1165 [ # # # # ]: 0 : SysErrorString(errno))};
1166 : : }
1167 : 454 : }
1168 : : // If the user disabled the key, it must be zero.
1169 [ + + + + ]: 1170 : if (!opts.use_xor && obfuscation != decltype(obfuscation){}) {
1170 : 1 : throw std::runtime_error{
1171 [ + - ]: 2 : strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1172 : : "Stored key: '%s', stored path: '%s'.",
1173 [ + - ]: 2 : HexStr(obfuscation), fs::PathToString(xor_key_path)),
1174 [ + - + - ]: 3 : };
1175 : : }
1176 [ + - + - : 3501 : LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation));
+ - ]
1177 : 1167 : return Obfuscation{obfuscation};
1178 : 1167 : }
1179 : :
1180 : 1168 : BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
1181 : 1168 : : m_prune_mode{opts.prune_target > 0},
1182 [ + + ]: 1168 : m_obfuscation{InitBlocksdirXorKey(opts)},
1183 [ + - ]: 1167 : m_opts{std::move(opts)},
1184 [ + - + + : 2303 : m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
+ - ]
1185 [ + - + - ]: 1168 : m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1186 [ + + + - : 2335 : m_interrupt{interrupt}
+ + ]
1187 : : {
1188 [ + + ]: 1167 : m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
1189 : :
1190 [ + + ]: 1166 : if (m_opts.block_tree_db_params.wipe_data) {
1191 [ + - ]: 18 : m_block_tree_db->WriteReindexing(true);
1192 [ + + ]: 18 : m_blockfiles_indexed = false;
1193 : : // If we're reindexing in prune mode, wipe away unusable block files and all undo data files
1194 [ + + ]: 18 : if (m_prune_mode) {
1195 [ + - ]: 5 : CleanupBlockRevFiles();
1196 : : }
1197 : : }
1198 : 1172 : }
1199 : :
1200 : : class ImportingNow
1201 : : {
1202 : : std::atomic<bool>& m_importing;
1203 : :
1204 : : public:
1205 : 958 : ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1206 : : {
1207 [ - + ]: 958 : assert(m_importing == false);
1208 : 958 : m_importing = true;
1209 : 958 : }
1210 : 958 : ~ImportingNow()
1211 : : {
1212 [ - + ]: 958 : assert(m_importing == true);
1213 : 958 : m_importing = false;
1214 : 958 : }
1215 : : };
1216 : :
1217 : 958 : void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1218 : : {
1219 : 958 : ImportingNow imp{chainman.m_blockman.m_importing};
1220 : :
1221 : : // -reindex
1222 [ + + ]: 958 : if (!chainman.m_blockman.m_blockfiles_indexed) {
1223 : 19 : int nFile = 0;
1224 : : // Map of disk positions for blocks with unknown parent (only used for reindex);
1225 : : // parent hash -> child disk position, multiple children can have the same parent.
1226 : 19 : std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1227 : 47 : while (true) {
1228 [ + - ]: 33 : FlatFilePos pos(nFile, 0);
1229 [ + - + + ]: 99 : if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1230 : : break; // No block files left to reindex
1231 : : }
1232 [ + - ]: 16 : AutoFile file{chainman.m_blockman.OpenBlockFile(pos, /*fReadOnly=*/true)};
1233 [ + - ]: 16 : if (file.IsNull()) {
1234 : : break; // This error is logged in OpenBlockFile
1235 : : }
1236 [ + - ]: 16 : LogInfo("Reindexing block file blk%05u.dat...", (unsigned int)nFile);
1237 [ + - ]: 16 : chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1238 [ + - + + ]: 16 : if (chainman.m_interrupt) {
1239 [ + - ]: 2 : LogInfo("Interrupt requested. Exit reindexing.");
1240 : 2 : return;
1241 : : }
1242 : 14 : nFile++;
1243 : 16 : }
1244 [ + - + - ]: 51 : WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1245 [ + - ]: 17 : chainman.m_blockman.m_blockfiles_indexed = true;
1246 [ + - ]: 17 : LogInfo("Reindexing finished");
1247 : : // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1248 [ + - + - ]: 17 : chainman.ActiveChainstate().LoadGenesisBlock();
1249 : 19 : }
1250 : :
1251 : : // -loadblock=
1252 [ + + ]: 957 : for (const fs::path& path : import_paths) {
1253 [ + - + - ]: 2 : AutoFile file{fsbridge::fopen(path, "rb")};
1254 [ + - ]: 1 : if (!file.IsNull()) {
1255 [ + - + - ]: 2 : LogInfo("Importing blocks file %s...", fs::PathToString(path));
1256 [ + - ]: 1 : chainman.LoadExternalBlockFile(file);
1257 [ + - - + ]: 1 : if (chainman.m_interrupt) {
1258 [ # # ]: 0 : LogInfo("Interrupt requested. Exit block importing.");
1259 : 0 : return;
1260 : : }
1261 : : } else {
1262 [ # # # # ]: 0 : LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1263 : : }
1264 : 1 : }
1265 : :
1266 : : // scan for better chains in the block chain database, that are not yet connected in the active best chain
1267 : :
1268 : : // We can't hold cs_main during ActivateBestChain even though we're accessing
1269 : : // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1270 : : // the relevant pointers before the ABC call.
1271 [ + - + + : 3829 : for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
+ - ]
1272 [ + - ]: 961 : BlockValidationState state;
1273 [ + - - + : 961 : if (!chainstate->ActivateBestChain(state, nullptr)) {
- + ]
1274 [ # # # # : 0 : chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
# # ]
1275 : 0 : return;
1276 : : }
1277 : 961 : }
1278 : : // End scope of ImportingNow
1279 : 958 : }
1280 : :
1281 : 11 : std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1282 [ - + - ]: 11 : switch(type) {
1283 : 0 : case BlockfileType::NORMAL: os << "normal"; break;
1284 : 11 : case BlockfileType::ASSUMED: os << "assumed"; break;
1285 : 0 : default: os.setstate(std::ios_base::failbit);
1286 : : }
1287 : 11 : return os;
1288 : : }
1289 : :
1290 : 11 : std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1291 [ + - ]: 11 : os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1292 : 11 : return os;
1293 : : }
1294 : : } // namespace node
|