Branch data Line data Source code
1 : : // Copyright (c) 2017-present The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <chainparams.h>
6 : : #include <common/args.h>
7 : : #include <index/base.h>
8 : : #include <interfaces/chain.h>
9 : : #include <kernel/chain.h>
10 : : #include <logging.h>
11 : : #include <node/abort.h>
12 : : #include <node/blockstorage.h>
13 : : #include <node/context.h>
14 : : #include <node/database_args.h>
15 : : #include <node/interface_ui.h>
16 : : #include <tinyformat.h>
17 : : #include <undo.h>
18 : : #include <util/string.h>
19 : : #include <util/thread.h>
20 : : #include <util/translation.h>
21 : : #include <validation.h>
22 : :
23 : : #include <chrono>
24 : : #include <memory>
25 : : #include <optional>
26 : : #include <stdexcept>
27 : : #include <string>
28 : : #include <thread>
29 : : #include <utility>
30 : :
31 : : constexpr uint8_t DB_BEST_BLOCK{'B'};
32 : :
33 : : constexpr auto SYNC_LOG_INTERVAL{30s};
34 : : constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
35 : :
36 : : template <typename... Args>
37 : 0 : void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
38 : : {
39 : 0 : auto message = tfm::format(fmt, args...);
40 [ # # # # : 0 : node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
# # # # #
# # # ]
41 : 0 : }
42 : :
43 : 0 : CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
44 : : {
45 : 0 : CBlockLocator locator;
46 [ # # ]: 0 : bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
47 [ # # ]: 0 : assert(found);
48 [ # # ]: 0 : assert(!locator.IsNull());
49 : 0 : return locator;
50 : 0 : }
51 : :
52 : 0 : BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
53 : 0 : CDBWrapper{DBParams{
54 : : .path = path,
55 : : .cache_bytes = n_cache_size,
56 : : .memory_only = f_memory,
57 : : .wipe_data = f_wipe,
58 : : .obfuscate = f_obfuscate,
59 [ # # # # ]: 0 : .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
60 : 0 : {}
61 : :
62 : 0 : bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
63 : : {
64 : 0 : bool success = Read(DB_BEST_BLOCK, locator);
65 [ # # ]: 0 : if (!success) {
66 [ # # ]: 0 : locator.SetNull();
67 : : }
68 : 0 : return success;
69 : : }
70 : :
71 : 0 : void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
72 : : {
73 : 0 : batch.Write(DB_BEST_BLOCK, locator);
74 : 0 : }
75 : :
76 : 0 : BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
77 [ # # ]: 0 : : m_chain{std::move(chain)}, m_name{std::move(name)} {}
78 : :
79 : 0 : BaseIndex::~BaseIndex()
80 : : {
81 : 0 : Interrupt();
82 : 0 : Stop();
83 : 0 : }
84 : :
85 : 0 : bool BaseIndex::Init()
86 : : {
87 : 0 : AssertLockNotHeld(cs_main);
88 : :
89 : : // May need reset if index is being restarted.
90 : 0 : m_interrupt.reset();
91 : :
92 : : // m_chainstate member gives indexing code access to node internals. It is
93 : : // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
94 [ # # # # : 0 : m_chainstate = WITH_LOCK(::cs_main,
# # ]
95 : : return &m_chain->context()->chainman->GetChainstateForIndexing());
96 : : // Register to validation interface before setting the 'm_synced' flag, so that
97 : : // callbacks are not missed once m_synced is true.
98 : 0 : m_chain->context()->validation_signals->RegisterValidationInterface(this);
99 : :
100 : 0 : CBlockLocator locator;
101 [ # # # # : 0 : if (!GetDB().ReadBestBlock(locator)) {
# # ]
102 [ # # ]: 0 : locator.SetNull();
103 : : }
104 : :
105 [ # # ]: 0 : LOCK(cs_main);
106 : 0 : CChain& index_chain = m_chainstate->m_chain;
107 : :
108 [ # # ]: 0 : if (locator.IsNull()) {
109 [ # # ]: 0 : SetBestBlockIndex(nullptr);
110 : : } else {
111 : : // Setting the best block to the locator's top block. If it is not part of the
112 : : // best chain, we will rewind to the fork point during index sync
113 [ # # # # ]: 0 : const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
114 [ # # ]: 0 : if (!locator_index) {
115 [ # # # # : 0 : return InitError(Untranslated(strprintf("%s: best block of the index not found. Please rebuild the index.", GetName())));
# # ]
116 : : }
117 [ # # ]: 0 : SetBestBlockIndex(locator_index);
118 : : }
119 : :
120 : : // Child init
121 [ # # ]: 0 : const CBlockIndex* start_block = m_best_block_index.load();
122 [ # # # # : 0 : if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
# # ]
123 : : return false;
124 : : }
125 : :
126 : : // Note: this will latch to true immediately if the user starts up with an empty
127 : : // datadir and an index enabled. If this is the case, indexation will happen solely
128 : : // via `BlockConnected` signals until, possibly, the next restart.
129 [ # # ]: 0 : m_synced = start_block == index_chain.Tip();
130 : 0 : m_init = true;
131 : 0 : return true;
132 : 0 : }
133 : :
134 : 0 : static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
135 : : {
136 : 0 : AssertLockHeld(cs_main);
137 : :
138 [ # # ]: 0 : if (!pindex_prev) {
139 [ # # ]: 0 : return chain.Genesis();
140 : : }
141 : :
142 : 0 : const CBlockIndex* pindex = chain.Next(pindex_prev);
143 [ # # ]: 0 : if (pindex) {
144 : : return pindex;
145 : : }
146 : :
147 : : // Since block is not in the chain, return the next block in the chain AFTER the last common ancestor.
148 : : // Caller will be responsible for rewinding back to the common ancestor.
149 : 0 : return chain.Next(chain.FindFork(pindex_prev));
150 : : }
151 : :
152 : 0 : bool BaseIndex::ProcessBlock(const CBlockIndex* pindex, const CBlock* block_data)
153 : : {
154 : 0 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block_data);
155 : :
156 : 0 : CBlock block;
157 [ # # ]: 0 : if (!block_data) { // disk lookup if block data wasn't provided
158 [ # # # # ]: 0 : if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) {
159 [ # # ]: 0 : FatalErrorf("%s: Failed to read block %s from disk",
160 [ # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
161 : 0 : return false;
162 : : }
163 : 0 : block_info.data = █
164 : : }
165 : :
166 : 0 : CBlockUndo block_undo;
167 [ # # # # ]: 0 : if (CustomOptions().connect_undo_data) {
168 [ # # # # : 0 : if (pindex->nHeight > 0 && !m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) {
# # ]
169 [ # # ]: 0 : FatalErrorf("%s: Failed to read undo block data %s from disk",
170 [ # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
171 : 0 : return false;
172 : : }
173 : 0 : block_info.undo_data = &block_undo;
174 : : }
175 : :
176 [ # # # # ]: 0 : if (!CustomAppend(block_info)) {
177 [ # # ]: 0 : FatalErrorf("%s: Failed to write block %s to index database",
178 [ # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
179 : 0 : return false;
180 : : }
181 : :
182 : : return true;
183 : 0 : }
184 : :
185 : 0 : void BaseIndex::Sync()
186 : : {
187 [ # # ]: 0 : const CBlockIndex* pindex = m_best_block_index.load();
188 [ # # ]: 0 : if (!m_synced) {
189 : 0 : std::chrono::steady_clock::time_point last_log_time{0s};
190 : 0 : std::chrono::steady_clock::time_point last_locator_write_time{0s};
191 : 0 : while (true) {
192 [ # # ]: 0 : if (m_interrupt) {
193 : 0 : LogPrintf("%s: m_interrupt set; exiting ThreadSync\n", GetName());
194 : :
195 : 0 : SetBestBlockIndex(pindex);
196 : : // No need to handle errors in Commit. If it fails, the error will be already be
197 : : // logged. The best way to recover is to continue, as index cannot be corrupted by
198 : : // a missed commit to disk for an advanced index state.
199 : 0 : Commit();
200 : 0 : return;
201 : : }
202 : :
203 [ # # # # ]: 0 : const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
204 : : // If pindex_next is null, it means pindex is the chain tip, so
205 : : // commit data indexed so far.
206 [ # # ]: 0 : if (!pindex_next) {
207 : 0 : SetBestBlockIndex(pindex);
208 : : // No need to handle errors in Commit. See rationale above.
209 : 0 : Commit();
210 : :
211 : : // If pindex is still the chain tip after committing, exit the
212 : : // sync loop. It is important for cs_main to be locked while
213 : : // setting m_synced = true, otherwise a new block could be
214 : : // attached while m_synced is still false, and it would not be
215 : : // indexed.
216 : 0 : LOCK(::cs_main);
217 [ # # ]: 0 : pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
218 [ # # ]: 0 : if (!pindex_next) {
219 [ # # ]: 0 : m_synced = true;
220 [ # # ]: 0 : break;
221 : : }
222 : 0 : }
223 [ # # # # ]: 0 : if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
224 : 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName());
225 : 0 : return;
226 : : }
227 : 0 : pindex = pindex_next;
228 : :
229 : :
230 [ # # ]: 0 : if (!ProcessBlock(pindex)) return; // error logged internally
231 : :
232 : 0 : auto current_time{std::chrono::steady_clock::now()};
233 [ # # ]: 0 : if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
234 : 0 : LogPrintf("Syncing %s with block chain from height %d\n",
235 : : GetName(), pindex->nHeight);
236 : 0 : last_log_time = current_time;
237 : : }
238 : :
239 [ # # ]: 0 : if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
240 : 0 : SetBestBlockIndex(pindex);
241 : 0 : last_locator_write_time = current_time;
242 : : // No need to handle errors in Commit. See rationale above.
243 : 0 : Commit();
244 : : }
245 : : }
246 : : }
247 : :
248 [ # # ]: 0 : if (pindex) {
249 : 0 : LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
250 : : } else {
251 : 0 : LogPrintf("%s is enabled\n", GetName());
252 : : }
253 : : }
254 : :
255 : 0 : bool BaseIndex::Commit()
256 : : {
257 : : // Don't commit anything if we haven't indexed any block yet
258 : : // (this could happen if init is interrupted).
259 [ # # ]: 0 : bool ok = m_best_block_index != nullptr;
260 [ # # ]: 0 : if (ok) {
261 : 0 : CDBBatch batch(GetDB());
262 [ # # ]: 0 : ok = CustomCommit(batch);
263 [ # # ]: 0 : if (ok) {
264 [ # # # # : 0 : GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
# # ]
265 [ # # # # ]: 0 : ok = GetDB().WriteBatch(batch);
266 : : }
267 : 0 : }
268 [ # # ]: 0 : if (!ok) {
269 : 0 : LogError("%s: Failed to commit latest %s state\n", __func__, GetName());
270 : 0 : return false;
271 : : }
272 : : return true;
273 : : }
274 : :
275 : 0 : bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
276 : : {
277 [ # # ]: 0 : assert(current_tip == m_best_block_index);
278 [ # # ]: 0 : assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
279 : :
280 : 0 : CBlock block;
281 : 0 : CBlockUndo block_undo;
282 : :
283 [ # # ]: 0 : for (const CBlockIndex* iter_tip = current_tip; iter_tip != new_tip; iter_tip = iter_tip->pprev) {
284 [ # # ]: 0 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(iter_tip);
285 [ # # # # ]: 0 : if (CustomOptions().disconnect_data) {
286 [ # # # # ]: 0 : if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) {
287 [ # # # # ]: 0 : LogError("%s: Failed to read block %s from disk",
288 : : __func__, iter_tip->GetBlockHash().ToString());
289 : 0 : return false;
290 : : }
291 : 0 : block_info.data = █
292 : : }
293 [ # # # # : 0 : if (CustomOptions().disconnect_undo_data && iter_tip->nHeight > 0) {
# # ]
294 [ # # # # ]: 0 : if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *iter_tip)) {
295 : : return false;
296 : : }
297 : 0 : block_info.undo_data = &block_undo;
298 : : }
299 [ # # # # ]: 0 : if (!CustomRemove(block_info)) {
300 : : return false;
301 : : }
302 : : }
303 : :
304 : : // In the case of a reorg, ensure persisted block locator is not stale.
305 : : // Pruning has a minimum of 288 blocks-to-keep and getting the index
306 : : // out of sync may be possible but a users fault.
307 : : // In case we reorg beyond the pruned depth, ReadBlock would
308 : : // throw and lead to a graceful shutdown
309 [ # # ]: 0 : SetBestBlockIndex(new_tip);
310 [ # # # # ]: 0 : if (!Commit()) {
311 : : // If commit fails, revert the best block index to avoid corruption.
312 [ # # ]: 0 : SetBestBlockIndex(current_tip);
313 : : return false;
314 : : }
315 : :
316 : : return true;
317 : 0 : }
318 : :
319 : 0 : void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
320 : : {
321 : : // Ignore events from the assumed-valid chain; we will process its blocks
322 : : // (sequentially) after it is fully verified by the background chainstate. This
323 : : // is to avoid any out-of-order indexing.
324 : : //
325 : : // TODO at some point we could parameterize whether a particular index can be
326 : : // built out of order, but for now just do the conservative simple thing.
327 [ # # ]: 0 : if (role == ChainstateRole::ASSUMEDVALID) {
328 : : return;
329 : : }
330 : :
331 : : // Ignore BlockConnected signals until we have fully indexed the chain.
332 [ # # ]: 0 : if (!m_synced) {
333 : : return;
334 : : }
335 : :
336 [ # # ]: 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
337 [ # # ]: 0 : if (!best_block_index) {
338 [ # # ]: 0 : if (pindex->nHeight != 0) {
339 : 0 : FatalErrorf("%s: First block connected is not the genesis block (height=%d)",
340 : 0 : __func__, pindex->nHeight);
341 : 0 : return;
342 : : }
343 : : } else {
344 : : // Ensure block connects to an ancestor of the current best block. This should be the case
345 : : // most of the time, but may not be immediately after the sync thread catches up and sets
346 : : // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
347 : : // in the ValidationInterface queue backlog even after the sync thread has caught up to the
348 : : // new chain tip. In this unlikely event, log a warning and let the queue clear.
349 [ # # ]: 0 : if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
350 [ # # # # ]: 0 : LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of "
351 : : "known best chain (tip=%s); not updating index\n",
352 : : __func__, pindex->GetBlockHash().ToString(),
353 : : best_block_index->GetBlockHash().ToString());
354 : 0 : return;
355 : : }
356 [ # # # # ]: 0 : if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
357 : 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
358 : 0 : __func__, GetName());
359 : 0 : return;
360 : : }
361 : : }
362 : :
363 : : // Dispatch block to child class; errors are logged internally and abort the node.
364 [ # # ]: 0 : if (ProcessBlock(pindex, block.get())) {
365 : : // Setting the best block index is intentionally the last step of this
366 : : // function, so BlockUntilSyncedToCurrentChain callers waiting for the
367 : : // best block index to be updated can rely on the block being fully
368 : : // processed, and the index object being safe to delete.
369 : 0 : SetBestBlockIndex(pindex);
370 : : }
371 : : }
372 : :
373 : 0 : void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator)
374 : : {
375 : : // Ignore events from the assumed-valid chain; we will process its blocks
376 : : // (sequentially) after it is fully verified by the background chainstate.
377 [ # # ]: 0 : if (role == ChainstateRole::ASSUMEDVALID) {
378 : : return;
379 : : }
380 : :
381 [ # # ]: 0 : if (!m_synced) {
382 : : return;
383 : : }
384 : :
385 : 0 : const uint256& locator_tip_hash = locator.vHave.front();
386 : 0 : const CBlockIndex* locator_tip_index;
387 : 0 : {
388 : 0 : LOCK(cs_main);
389 [ # # # # ]: 0 : locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
390 : 0 : }
391 : :
392 [ # # ]: 0 : if (!locator_tip_index) {
393 [ # # ]: 0 : FatalErrorf("%s: First block (hash=%s) in locator was not found",
394 : 0 : __func__, locator_tip_hash.ToString());
395 : 0 : return;
396 : : }
397 : :
398 : : // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
399 : : // immediately after the sync thread catches up and sets m_synced. Consider the case where
400 : : // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
401 : : // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
402 : : // event, log a warning and let the queue clear.
403 : 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
404 [ # # ]: 0 : if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
405 [ # # # # ]: 0 : LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best "
406 : : "chain (tip=%s); not writing index locator\n",
407 : : __func__, locator_tip_hash.ToString(),
408 : : best_block_index->GetBlockHash().ToString());
409 : 0 : return;
410 : : }
411 : :
412 : : // No need to handle errors in Commit. If it fails, the error will be already be logged. The
413 : : // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
414 : : // for an advanced index state.
415 : 0 : Commit();
416 : : }
417 : :
418 : 0 : bool BaseIndex::BlockUntilSyncedToCurrentChain() const
419 : : {
420 : 0 : AssertLockNotHeld(cs_main);
421 : :
422 [ # # ]: 0 : if (!m_synced) {
423 : : return false;
424 : : }
425 : :
426 : 0 : {
427 : : // Skip the queue-draining stuff if we know we're caught up with
428 : : // m_chain.Tip().
429 : 0 : LOCK(cs_main);
430 [ # # ]: 0 : const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
431 [ # # ]: 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
432 [ # # # # ]: 0 : if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
433 [ # # ]: 0 : return true;
434 : : }
435 : 0 : }
436 : :
437 : 0 : LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
438 : 0 : m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue();
439 : 0 : return true;
440 : : }
441 : :
442 : 0 : void BaseIndex::Interrupt()
443 : : {
444 : 0 : m_interrupt();
445 : 0 : }
446 : :
447 : 0 : bool BaseIndex::StartBackgroundSync()
448 : : {
449 [ # # # # ]: 0 : if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
450 : :
451 : 0 : m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
452 : 0 : return true;
453 : : }
454 : :
455 : 0 : void BaseIndex::Stop()
456 : : {
457 [ # # ]: 0 : if (m_chain->context()->validation_signals) {
458 : 0 : m_chain->context()->validation_signals->UnregisterValidationInterface(this);
459 : : }
460 : :
461 [ # # ]: 0 : if (m_thread_sync.joinable()) {
462 : 0 : m_thread_sync.join();
463 : : }
464 : 0 : }
465 : :
466 : 0 : IndexSummary BaseIndex::GetSummary() const
467 : : {
468 [ # # ]: 0 : IndexSummary summary{};
469 [ # # ]: 0 : summary.name = GetName();
470 [ # # ]: 0 : summary.synced = m_synced;
471 [ # # ]: 0 : if (const auto& pindex = m_best_block_index.load()) {
472 : 0 : summary.best_block_height = pindex->nHeight;
473 : 0 : summary.best_block_hash = pindex->GetBlockHash();
474 : : } else {
475 : 0 : summary.best_block_height = 0;
476 [ # # ]: 0 : summary.best_block_hash = m_chain->getBlockHash(0);
477 : : }
478 : 0 : return summary;
479 : 0 : }
480 : :
481 : 0 : void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
482 : : {
483 [ # # # # ]: 0 : assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
484 : :
485 [ # # # # ]: 0 : if (AllowPrune() && block) {
486 : 0 : node::PruneLockInfo prune_lock;
487 : 0 : prune_lock.height_first = block->nHeight;
488 [ # # ]: 0 : WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
489 : : }
490 : :
491 : : // Intentionally set m_best_block_index as the last step in this function,
492 : : // after updating prune locks above, and after making any other references
493 : : // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
494 : : // m_best_block_index as an optimization) can be used to wait for the last
495 : : // BlockConnected notification and safely assume that prune locks are
496 : : // updated and that the index object is safe to delete.
497 : 0 : m_best_block_index = block;
498 : 0 : }
|