Branch data Line data Source code
1 : : // Copyright (c) 2017-present The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <chainparams.h>
6 : : #include <common/args.h>
7 : : #include <index/base.h>
8 : : #include <interfaces/chain.h>
9 : : #include <kernel/chain.h>
10 : : #include <logging.h>
11 : : #include <node/abort.h>
12 : : #include <node/blockstorage.h>
13 : : #include <node/context.h>
14 : : #include <node/database_args.h>
15 : : #include <node/interface_ui.h>
16 : : #include <tinyformat.h>
17 : : #include <util/string.h>
18 : : #include <util/thread.h>
19 : : #include <util/translation.h>
20 : : #include <validation.h> // For g_chainman
21 : :
22 : : #include <string>
23 : : #include <utility>
24 : :
25 : : constexpr uint8_t DB_BEST_BLOCK{'B'};
26 : :
27 : : constexpr auto SYNC_LOG_INTERVAL{30s};
28 : : constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
29 : :
30 : : template <typename... Args>
31 : 0 : void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
32 : : {
33 : 0 : auto message = tfm::format(fmt, args...);
34 [ # # # # : 0 : node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
# # # # #
# # # ]
35 : 0 : }
36 : :
37 : 313 : CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
38 : : {
39 : 313 : CBlockLocator locator;
40 [ + - ]: 313 : bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
41 [ - + ]: 313 : assert(found);
42 [ - + ]: 313 : assert(!locator.IsNull());
43 : 313 : return locator;
44 : 0 : }
45 : :
46 : 113 : BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
47 : 0 : CDBWrapper{DBParams{
48 : : .path = path,
49 : : .cache_bytes = n_cache_size,
50 : : .memory_only = f_memory,
51 : : .wipe_data = f_wipe,
52 : : .obfuscate = f_obfuscate,
53 [ + - + - ]: 113 : .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
54 : 113 : {}
55 : :
56 : 118 : bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
57 : : {
58 : 118 : bool success = Read(DB_BEST_BLOCK, locator);
59 [ + + ]: 118 : if (!success) {
60 [ - + ]: 48 : locator.SetNull();
61 : : }
62 : 118 : return success;
63 : : }
64 : :
65 : 313 : void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
66 : : {
67 : 313 : batch.Write(DB_BEST_BLOCK, locator);
68 : 313 : }
69 : :
70 : 113 : BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
71 [ + - ]: 113 : : m_chain{std::move(chain)}, m_name{std::move(name)} {}
72 : :
73 : 113 : BaseIndex::~BaseIndex()
74 : : {
75 : 113 : Interrupt();
76 : 113 : Stop();
77 : 113 : }
78 : :
79 : 118 : bool BaseIndex::Init()
80 : : {
81 : 118 : AssertLockNotHeld(cs_main);
82 : :
83 : : // May need reset if index is being restarted.
84 : 118 : m_interrupt.reset();
85 : :
86 : : // m_chainstate member gives indexing code access to node internals. It is
87 : : // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
88 [ + - + - : 354 : m_chainstate = WITH_LOCK(::cs_main,
+ - ]
89 : : return &m_chain->context()->chainman->GetChainstateForIndexing());
90 : : // Register to validation interface before setting the 'm_synced' flag, so that
91 : : // callbacks are not missed once m_synced is true.
92 : 118 : m_chain->context()->validation_signals->RegisterValidationInterface(this);
93 : :
94 : 118 : CBlockLocator locator;
95 [ + - + - : 118 : if (!GetDB().ReadBestBlock(locator)) {
+ + ]
96 [ - + ]: 48 : locator.SetNull();
97 : : }
98 : :
99 [ + - ]: 118 : LOCK(cs_main);
100 : 118 : CChain& index_chain = m_chainstate->m_chain;
101 : :
102 [ + + ]: 118 : if (locator.IsNull()) {
103 [ + - ]: 48 : SetBestBlockIndex(nullptr);
104 : : } else {
105 : : // Setting the best block to the locator's top block. If it is not part of the
106 : : // best chain, we will rewind to the fork point during index sync
107 [ + - + - ]: 70 : const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
108 [ - + ]: 70 : if (!locator_index) {
109 [ # # # # : 0 : return InitError(strprintf(Untranslated("%s: best block of the index not found. Please rebuild the index."), GetName()));
# # # # ]
110 : : }
111 [ + - ]: 70 : SetBestBlockIndex(locator_index);
112 : : }
113 : :
114 : : // Child init
115 [ + + ]: 118 : const CBlockIndex* start_block = m_best_block_index.load();
116 [ + + + - : 118 : if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
+ - ]
117 : : return false;
118 : : }
119 : :
120 : : // Note: this will latch to true immediately if the user starts up with an empty
121 : : // datadir and an index enabled. If this is the case, indexation will happen solely
122 : : // via `BlockConnected` signals until, possibly, the next restart.
123 [ + + ]: 207 : m_synced = start_block == index_chain.Tip();
124 : 118 : m_init = true;
125 : 118 : return true;
126 : 118 : }
127 : :
128 : 9208 : static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
129 : : {
130 : 9208 : AssertLockHeld(cs_main);
131 : :
132 [ + + ]: 9208 : if (!pindex_prev) {
133 [ + - ]: 26 : return chain.Genesis();
134 : : }
135 : :
136 : 9182 : const CBlockIndex* pindex = chain.Next(pindex_prev);
137 [ + + ]: 9182 : if (pindex) {
138 : : return pindex;
139 : : }
140 : :
141 : 83 : return chain.Next(chain.FindFork(pindex_prev));
142 : : }
143 : :
144 : 114 : void BaseIndex::Sync()
145 : : {
146 [ + + ]: 114 : const CBlockIndex* pindex = m_best_block_index.load();
147 [ + + ]: 114 : if (!m_synced) {
148 : 76 : std::chrono::steady_clock::time_point last_log_time{0s};
149 : 76 : std::chrono::steady_clock::time_point last_locator_write_time{0s};
150 : 18328 : while (true) {
151 [ + + ]: 9202 : if (m_interrupt) {
152 : 35 : LogPrintf("%s: m_interrupt set; exiting ThreadSync\n", GetName());
153 : :
154 : 35 : SetBestBlockIndex(pindex);
155 : : // No need to handle errors in Commit. If it fails, the error will be already be
156 : : // logged. The best way to recover is to continue, as index cannot be corrupted by
157 : : // a missed commit to disk for an advanced index state.
158 : 35 : Commit();
159 : 35 : return;
160 : : }
161 : :
162 [ + - + - ]: 27501 : const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
163 : : // If pindex_next is null, it means pindex is the chain tip, so
164 : : // commit data indexed so far.
165 [ + + ]: 9167 : if (!pindex_next) {
166 : 41 : SetBestBlockIndex(pindex);
167 : : // No need to handle errors in Commit. See rationale above.
168 : 41 : Commit();
169 : :
170 : : // If pindex is still the chain tip after committing, exit the
171 : : // sync loop. It is important for cs_main to be locked while
172 : : // setting m_synced = true, otherwise a new block could be
173 : : // attached while m_synced is still false, and it would not be
174 : : // indexed.
175 : 41 : LOCK(::cs_main);
176 [ + - ]: 41 : pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
177 [ + - ]: 41 : if (!pindex_next) {
178 [ + - ]: 41 : m_synced = true;
179 [ + - ]: 41 : break;
180 : : }
181 : 41 : }
182 [ + + - + ]: 9126 : if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
183 : 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName());
184 : 0 : return;
185 : : }
186 : 9126 : pindex = pindex_next;
187 : :
188 : :
189 : 9126 : CBlock block;
190 [ + - ]: 9126 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex);
191 [ + - - + ]: 9126 : if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) {
192 [ # # ]: 0 : FatalErrorf("%s: Failed to read block %s from disk",
193 [ # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
194 : 0 : return;
195 : : } else {
196 : 9126 : block_info.data = █
197 : : }
198 [ + - - + ]: 9126 : if (!CustomAppend(block_info)) {
199 [ # # ]: 0 : FatalErrorf("%s: Failed to write block %s to index database",
200 [ # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
201 : 0 : return;
202 : : }
203 : :
204 : 9126 : auto current_time{std::chrono::steady_clock::now()};
205 [ + + ]: 9126 : if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
206 [ + - ]: 78 : LogPrintf("Syncing %s with block chain from height %d\n",
207 : : GetName(), pindex->nHeight);
208 : 78 : last_log_time = current_time;
209 : : }
210 : :
211 [ + + ]: 9126 : if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
212 [ + - ]: 78 : SetBestBlockIndex(pindex);
213 : 78 : last_locator_write_time = current_time;
214 : : // No need to handle errors in Commit. See rationale above.
215 [ + - ]: 78 : Commit();
216 : : }
217 : 9126 : }
218 : : }
219 : :
220 [ + + ]: 79 : if (pindex) {
221 : 65 : LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
222 : : } else {
223 : 14 : LogPrintf("%s is enabled\n", GetName());
224 : : }
225 : : }
226 : :
227 : 313 : bool BaseIndex::Commit()
228 : : {
229 : : // Don't commit anything if we haven't indexed any block yet
230 : : // (this could happen if init is interrupted).
231 [ + - ]: 313 : bool ok = m_best_block_index != nullptr;
232 [ + - ]: 313 : if (ok) {
233 : 313 : CDBBatch batch(GetDB());
234 [ + - ]: 313 : ok = CustomCommit(batch);
235 [ + - ]: 313 : if (ok) {
236 [ + - + - : 313 : GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
+ - ]
237 [ + - + - ]: 313 : ok = GetDB().WriteBatch(batch);
238 : : }
239 : 313 : }
240 [ - + ]: 313 : if (!ok) {
241 : 0 : LogError("%s: Failed to commit latest %s state\n", __func__, GetName());
242 : 0 : return false;
243 : : }
244 : : return true;
245 : : }
246 : :
247 : 13 : bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
248 : : {
249 [ - + ]: 13 : assert(current_tip == m_best_block_index);
250 [ - + ]: 13 : assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
251 : :
252 [ + - ]: 13 : if (!CustomRewind({current_tip->GetBlockHash(), current_tip->nHeight}, {new_tip->GetBlockHash(), new_tip->nHeight})) {
253 : : return false;
254 : : }
255 : :
256 : : // In the case of a reorg, ensure persisted block locator is not stale.
257 : : // Pruning has a minimum of 288 blocks-to-keep and getting the index
258 : : // out of sync may be possible but a users fault.
259 : : // In case we reorg beyond the pruned depth, ReadBlockFromDisk would
260 : : // throw and lead to a graceful shutdown
261 : 13 : SetBestBlockIndex(new_tip);
262 [ - + ]: 13 : if (!Commit()) {
263 : : // If commit fails, revert the best block index to avoid corruption.
264 : 0 : SetBestBlockIndex(current_tip);
265 : 0 : return false;
266 : : }
267 : :
268 : : return true;
269 : : }
270 : :
271 : 25507 : void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
272 : : {
273 : : // Ignore events from the assumed-valid chain; we will process its blocks
274 : : // (sequentially) after it is fully verified by the background chainstate. This
275 : : // is to avoid any out-of-order indexing.
276 : : //
277 : : // TODO at some point we could parameterize whether a particular index can be
278 : : // built out of order, but for now just do the conservative simple thing.
279 [ + + ]: 25507 : if (role == ChainstateRole::ASSUMEDVALID) {
280 : : return;
281 : : }
282 : :
283 : : // Ignore BlockConnected signals until we have fully indexed the chain.
284 [ + + ]: 24707 : if (!m_synced) {
285 : : return;
286 : : }
287 : :
288 [ + + ]: 22694 : const CBlockIndex* best_block_index = m_best_block_index.load();
289 [ + + ]: 22694 : if (!best_block_index) {
290 [ - + ]: 22 : if (pindex->nHeight != 0) {
291 : 0 : FatalErrorf("%s: First block connected is not the genesis block (height=%d)",
292 : 0 : __func__, pindex->nHeight);
293 : 0 : return;
294 : : }
295 : : } else {
296 : : // Ensure block connects to an ancestor of the current best block. This should be the case
297 : : // most of the time, but may not be immediately after the sync thread catches up and sets
298 : : // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
299 : : // in the ValidationInterface queue backlog even after the sync thread has caught up to the
300 : : // new chain tip. In this unlikely event, log a warning and let the queue clear.
301 [ - + ]: 22672 : if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
302 [ # # # # ]: 0 : LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of "
303 : : "known best chain (tip=%s); not updating index\n",
304 : : __func__, pindex->GetBlockHash().ToString(),
305 : : best_block_index->GetBlockHash().ToString());
306 : 0 : return;
307 : : }
308 [ + + - + ]: 22672 : if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
309 : 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
310 : 0 : __func__, GetName());
311 : 0 : return;
312 : : }
313 : : }
314 : 22694 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block.get());
315 [ + - ]: 22694 : if (CustomAppend(block_info)) {
316 : : // Setting the best block index is intentionally the last step of this
317 : : // function, so BlockUntilSyncedToCurrentChain callers waiting for the
318 : : // best block index to be updated can rely on the block being fully
319 : : // processed, and the index object being safe to delete.
320 : 22694 : SetBestBlockIndex(pindex);
321 : : } else {
322 [ # # ]: 0 : FatalErrorf("%s: Failed to write block %s to index",
323 : 0 : __func__, pindex->GetBlockHash().ToString());
324 : 0 : return;
325 : : }
326 : : }
327 : :
328 : 235 : void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator)
329 : : {
330 : : // Ignore events from the assumed-valid chain; we will process its blocks
331 : : // (sequentially) after it is fully verified by the background chainstate.
332 [ + + ]: 235 : if (role == ChainstateRole::ASSUMEDVALID) {
333 : : return;
334 : : }
335 : :
336 [ + + ]: 204 : if (!m_synced) {
337 : : return;
338 : : }
339 : :
340 : 153 : const uint256& locator_tip_hash = locator.vHave.front();
341 : 153 : const CBlockIndex* locator_tip_index;
342 : 153 : {
343 : 153 : LOCK(cs_main);
344 [ + - + - ]: 153 : locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
345 : 0 : }
346 : :
347 [ - + ]: 153 : if (!locator_tip_index) {
348 [ # # ]: 0 : FatalErrorf("%s: First block (hash=%s) in locator was not found",
349 : 0 : __func__, locator_tip_hash.ToString());
350 : 0 : return;
351 : : }
352 : :
353 : : // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
354 : : // immediately after the sync thread catches up and sets m_synced. Consider the case where
355 : : // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
356 : : // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
357 : : // event, log a warning and let the queue clear.
358 : 153 : const CBlockIndex* best_block_index = m_best_block_index.load();
359 [ + + ]: 153 : if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
360 [ + - + - ]: 14 : LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best "
361 : : "chain (tip=%s); not writing index locator\n",
362 : : __func__, locator_tip_hash.ToString(),
363 : : best_block_index->GetBlockHash().ToString());
364 : 7 : return;
365 : : }
366 : :
367 : : // No need to handle errors in Commit. If it fails, the error will be already be logged. The
368 : : // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
369 : : // for an advanced index state.
370 : 146 : Commit();
371 : : }
372 : :
373 : 354 : bool BaseIndex::BlockUntilSyncedToCurrentChain() const
374 : : {
375 : 354 : AssertLockNotHeld(cs_main);
376 : :
377 [ + + ]: 354 : if (!m_synced) {
378 : : return false;
379 : : }
380 : :
381 : 115 : {
382 : : // Skip the queue-draining stuff if we know we're caught up with
383 : : // m_chain.Tip().
384 : 115 : LOCK(cs_main);
385 [ + - ]: 115 : const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
386 [ + - ]: 115 : const CBlockIndex* best_block_index = m_best_block_index.load();
387 [ + - + + ]: 115 : if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
388 [ + - ]: 97 : return true;
389 : : }
390 : 97 : }
391 : :
392 : 18 : LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
393 : 18 : m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue();
394 : 18 : return true;
395 : : }
396 : :
397 : 227 : void BaseIndex::Interrupt()
398 : : {
399 : 227 : m_interrupt();
400 : 227 : }
401 : :
402 : 114 : bool BaseIndex::StartBackgroundSync()
403 : : {
404 [ - + - - ]: 114 : if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
405 : :
406 : 228 : m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
407 : 114 : return true;
408 : : }
409 : :
410 : 231 : void BaseIndex::Stop()
411 : : {
412 [ + + ]: 231 : if (m_chain->context()->validation_signals) {
413 : 228 : m_chain->context()->validation_signals->UnregisterValidationInterface(this);
414 : : }
415 : :
416 [ + + ]: 231 : if (m_thread_sync.joinable()) {
417 : 114 : m_thread_sync.join();
418 : : }
419 : 231 : }
420 : :
421 : 6635 : IndexSummary BaseIndex::GetSummary() const
422 : : {
423 [ + - ]: 6635 : IndexSummary summary{};
424 [ + - ]: 6635 : summary.name = GetName();
425 [ + + ]: 6635 : summary.synced = m_synced;
426 [ + + ]: 6635 : if (const auto& pindex = m_best_block_index.load()) {
427 : 6597 : summary.best_block_height = pindex->nHeight;
428 : 6597 : summary.best_block_hash = pindex->GetBlockHash();
429 : : } else {
430 : 38 : summary.best_block_height = 0;
431 [ + - ]: 38 : summary.best_block_hash = m_chain->getBlockHash(0);
432 : : }
433 : 6635 : return summary;
434 : 0 : }
435 : :
436 : 22979 : void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
437 : : {
438 [ + + - + ]: 22979 : assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
439 : :
440 [ + + + + ]: 22979 : if (AllowPrune() && block) {
441 : 20814 : node::PruneLockInfo prune_lock;
442 : 20814 : prune_lock.height_first = block->nHeight;
443 [ + - ]: 62442 : WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
444 : : }
445 : :
446 : : // Intentionally set m_best_block_index as the last step in this function,
447 : : // after updating prune locks above, and after making any other references
448 : : // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
449 : : // m_best_block_index as an optimization) can be used to wait for the last
450 : : // BlockConnected notification and safely assume that prune locks are
451 : : // updated and that the index object is safe to delete.
452 : 22979 : m_best_block_index = block;
453 : 22979 : }
|