Branch data Line data Source code
1 : : // Copyright (c) 2016-2022 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <blockencodings.h>
6 : : #include <chainparams.h>
7 : : #include <common/system.h>
8 : : #include <consensus/consensus.h>
9 : : #include <consensus/validation.h>
10 : : #include <crypto/sha256.h>
11 : : #include <crypto/siphash.h>
12 : : #include <logging.h>
13 : : #include <random.h>
14 : : #include <streams.h>
15 : : #include <txmempool.h>
16 : : #include <validation.h>
17 : :
18 : : #include <unordered_map>
19 : :
20 : 5 : CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) :
21 : 5 : nonce(nonce),
22 [ + - ]: 5 : shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
23 [ + - ]: 5 : FillShortTxIDSelector();
24 : : //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25 [ - + ]: 5 : prefilledtxn[0] = {0, block.vtx[0]};
26 [ + + ]: 13 : for (size_t i = 1; i < block.vtx.size(); i++) {
27 [ + - ]: 8 : const CTransaction& tx = *block.vtx[i];
28 [ + - ]: 8 : shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
29 : : }
30 [ + - ]: 15 : }
31 : :
32 : 12 : void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33 : 12 : DataStream stream{};
34 [ + - + - ]: 12 : stream << header << nonce;
35 [ + - ]: 12 : CSHA256 hasher;
36 [ + - ]: 12 : hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
37 : 12 : uint256 shorttxidhash;
38 [ + - ]: 12 : hasher.Finalize(shorttxidhash.begin());
39 : 12 : shorttxidk0 = shorttxidhash.GetUint64(0);
40 : 12 : shorttxidk1 = shorttxidhash.GetUint64(1);
41 : 12 : }
42 : :
43 : 18 : uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const {
44 : 18 : static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45 : 18 : return SipHashUint256(shorttxidk0, shorttxidk1, wtxid) & 0xffffffffffffL;
46 : : }
47 : :
48 : :
49 : :
50 : 6 : ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn) {
51 [ + - + + : 6 : if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
+ - ]
52 : : return READ_STATUS_INVALID;
53 [ + - ]: 6 : if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
54 : : return READ_STATUS_INVALID;
55 : :
56 [ + - + - ]: 6 : if (!header.IsNull() || !txn_available.empty()) return READ_STATUS_INVALID;
57 : :
58 : 6 : header = cmpctblock.header;
59 : 6 : txn_available.resize(cmpctblock.BlockTxCount());
60 : :
61 : 6 : int32_t lastprefilledindex = -1;
62 [ + + ]: 13 : for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
63 [ + - ]: 7 : if (cmpctblock.prefilledtxn[i].tx->IsNull())
64 : : return READ_STATUS_INVALID;
65 : :
66 : 7 : lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
67 [ + - ]: 7 : if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
68 : : return READ_STATUS_INVALID;
69 [ + - ]: 7 : if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
70 : : // If we are inserting a tx at an index greater than our full list of shorttxids
71 : : // plus the number of prefilled txn we've inserted, then we have txn for which we
72 : : // have neither a prefilled txn or a shorttxid!
73 : : return READ_STATUS_INVALID;
74 : : }
75 : 7 : txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
76 : : }
77 : 6 : prefilled_count = cmpctblock.prefilledtxn.size();
78 : :
79 : : // Calculate map of txids -> positions and check mempool to see what we have (or don't)
80 : : // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
81 : : // of short IDs, any highly-uneven distribution of elements can be safely treated as a
82 : : // READ_STATUS_FAILED.
83 : 6 : std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
84 : : uint16_t index_offset = 0;
85 [ + + ]: 15 : for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) {
86 [ + + ]: 14 : while (txn_available[i + index_offset])
87 : 5 : index_offset++;
88 [ + - ]: 9 : shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
89 : : // To determine the chance that the number of entries in a bucket exceeds N,
90 : : // we use the fact that the number of elements in a single bucket is
91 : : // binomially distributed (with n = the number of shorttxids S, and p =
92 : : // 1 / the number of buckets), that in the worst case the number of buckets is
93 : : // equal to S (due to std::unordered_map having a default load factor of 1.0),
94 : : // and that the chance for any bucket to exceed N elements is at most
95 : : // buckets * (the chance that any given bucket is above N elements).
96 : : // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
97 : : // If we assume blocks of up to 16000, allowing 12 elements per bucket should
98 : : // only fail once per ~1 million block transfers (per peer and connection).
99 [ + - ]: 9 : if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
100 : : return READ_STATUS_FAILED;
101 : : }
102 : : // TODO: in the shortid-collision case, we should instead request both transactions
103 : : // which collided. Falling back to full-block-request here is overkill.
104 [ + - ]: 6 : if (shorttxids.size() != cmpctblock.shorttxids.size())
105 : : return READ_STATUS_FAILED; // Short ID collision
106 : :
107 [ + - + - ]: 12 : std::vector<bool> have_txn(txn_available.size());
108 : 6 : {
109 [ + - ]: 6 : LOCK(pool->cs);
110 [ + + ]: 10 : for (const auto& tx : pool->txns_randomized) {
111 [ + - ]: 5 : uint64_t shortid = cmpctblock.GetShortID(tx->GetWitnessHash());
112 : 5 : std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
113 [ + - ]: 5 : if (idit != shorttxids.end()) {
114 [ + - ]: 5 : if (!have_txn[idit->second]) {
115 : 5 : txn_available[idit->second] = tx;
116 : 5 : have_txn[idit->second] = true;
117 : 5 : mempool_count++;
118 : : } else {
119 : : // If we find two mempool txn that match the short id, just request it.
120 : : // This should be rare enough that the extra bandwidth doesn't matter,
121 : : // but eating a round-trip due to FillBlock failure would be annoying
122 [ # # ]: 0 : if (txn_available[idit->second]) {
123 : 0 : txn_available[idit->second].reset();
124 : 0 : mempool_count--;
125 : : }
126 : : }
127 : : }
128 : : // Though ideally we'd continue scanning for the two-txn-match-shortid case,
129 : : // the performance win of an early exit here is too good to pass up and worth
130 : : // the extra risk.
131 [ + + ]: 5 : if (mempool_count == shorttxids.size())
132 : : break;
133 : : }
134 : 0 : }
135 : :
136 [ + + ]: 17 : for (size_t i = 0; i < extra_txn.size(); i++) {
137 [ + + ]: 12 : if (extra_txn[i] == nullptr) {
138 : 10 : continue;
139 : : }
140 [ + - ]: 2 : uint64_t shortid = cmpctblock.GetShortID(extra_txn[i]->GetWitnessHash());
141 : 2 : std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
142 [ + + ]: 2 : if (idit != shorttxids.end()) {
143 [ + - ]: 1 : if (!have_txn[idit->second]) {
144 : 1 : txn_available[idit->second] = extra_txn[i];
145 : 1 : have_txn[idit->second] = true;
146 : 1 : mempool_count++;
147 : 1 : extra_count++;
148 : : } else {
149 : : // If we find two mempool/extra txn that match the short id, just
150 : : // request it.
151 : : // This should be rare enough that the extra bandwidth doesn't matter,
152 : : // but eating a round-trip due to FillBlock failure would be annoying
153 : : // Note that we don't want duplication between extra_txn and mempool to
154 : : // trigger this case, so we compare witness hashes first
155 [ # # # # ]: 0 : if (txn_available[idit->second] &&
156 [ # # # # ]: 0 : txn_available[idit->second]->GetWitnessHash() != extra_txn[i]->GetWitnessHash()) {
157 : 0 : txn_available[idit->second].reset();
158 : 0 : mempool_count--;
159 : 0 : extra_count--;
160 : : }
161 : : }
162 : : }
163 : : // Though ideally we'd continue scanning for the two-txn-match-shortid case,
164 : : // the performance win of an early exit here is too good to pass up and worth
165 : : // the extra risk.
166 [ + + ]: 2 : if (mempool_count == shorttxids.size())
167 : : break;
168 : : }
169 : :
170 [ + - + - : 12 : LogDebug(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
+ - + - +
- + - ]
171 : :
172 : 6 : return READ_STATUS_OK;
173 : 12 : }
174 : :
175 : 16 : bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
176 : : {
177 [ + - ]: 16 : if (header.IsNull()) return false;
178 : :
179 [ - + ]: 16 : assert(index < txn_available.size());
180 : 16 : return txn_available[index] != nullptr;
181 : : }
182 : :
183 : 8 : ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
184 : : {
185 [ + - ]: 8 : if (header.IsNull()) return READ_STATUS_INVALID;
186 : :
187 : 8 : uint256 hash = header.GetHash();
188 : 8 : block = header;
189 : 8 : block.vtx.resize(txn_available.size());
190 : :
191 : 8 : size_t tx_missing_offset = 0;
192 [ + + ]: 25 : for (size_t i = 0; i < txn_available.size(); i++) {
193 [ + + ]: 19 : if (!txn_available[i]) {
194 [ + + ]: 6 : if (vtx_missing.size() <= tx_missing_offset)
195 : : return READ_STATUS_INVALID;
196 : 4 : block.vtx[i] = vtx_missing[tx_missing_offset++];
197 : : } else
198 : 13 : block.vtx[i] = std::move(txn_available[i]);
199 : : }
200 : :
201 : : // Make sure we can't call FillBlock again.
202 : 6 : header.SetNull();
203 : 6 : txn_available.clear();
204 : :
205 [ + - ]: 6 : if (vtx_missing.size() != tx_missing_offset)
206 : : return READ_STATUS_INVALID;
207 : :
208 [ - + ]: 6 : BlockValidationState state;
209 [ - + - - ]: 6 : CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock;
210 [ + - + - : 6 : if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
+ + ]
211 : : // TODO: We really want to just check merkle tree manually here,
212 : : // but that is expensive, and CheckBlock caches a block's
213 : : // "checked-status" (in the CBlock?). CBlock should be able to
214 : : // check its own merkle root and cache that check.
215 [ - + ]: 2 : if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
216 : : return READ_STATUS_FAILED; // Possible Short ID collision
217 : 0 : return READ_STATUS_CHECKBLOCK_FAILED;
218 : : }
219 : :
220 [ + - + - : 8 : LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
+ - + - ]
221 [ + - ]: 4 : if (vtx_missing.size() < 5) {
222 [ + + ]: 6 : for (const auto& tx : vtx_missing) {
223 [ + - + - : 4 : LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
+ - + - +
- ]
224 : : }
225 : : }
226 : :
227 : : return READ_STATUS_OK;
228 : 12 : }
|