Branch data Line data Source code
1 : : // Copyright (c) 2020-2021 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : :
6 : : #include <txrequest.h>
7 : : #include <uint256.h>
8 : :
9 : : #include <test/util/random.h>
10 : : #include <test/util/setup_common.h>
11 : :
12 : : #include <algorithm>
13 : : #include <functional>
14 : : #include <vector>
15 : :
16 : : #include <boost/test/unit_test.hpp>
17 : :
18 : : namespace {
19 : :
20 : : class Scenario;
21 : :
22 : 2 : struct TxRequestTest : BasicTestingSetup {
23 : : std::chrono::microseconds RandomTime8s();
24 : : std::chrono::microseconds RandomTime1y();
25 : : void BuildSingleTest(Scenario& scenario, int config);
26 : : void BuildPriorityTest(Scenario& scenario, int config);
27 : : void BuildBigPriorityTest(Scenario& scenario, int peers);
28 : : void BuildRequestOrderTest(Scenario& scenario, int config);
29 : : void BuildWtxidTest(Scenario& scenario, int config);
30 : : void BuildTimeBackwardsTest(Scenario& scenario);
31 : : void BuildWeirdRequestsTest(Scenario& scenario);
32 : : void TestInterleavedScenarios();
33 : : };
34 : :
35 : : constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min();
36 : : constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max();
37 : : constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1};
38 : : constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0};
39 : :
40 : : /** An Action is a function to call at a particular (simulated) timestamp. */
41 : : using Action = std::pair<std::chrono::microseconds, std::function<void()>>;
42 : :
43 : : /** Object that stores actions from multiple interleaved scenarios, and data shared across them.
44 : : *
45 : : * The Scenario below is used to fill this.
46 : : */
47 : : struct Runner
48 : : {
49 : : /** The TxRequestTracker being tested. */
50 : : TxRequestTracker txrequest;
51 : :
52 : : /** List of actions to be executed (in order of increasing timestamp). */
53 : : std::vector<Action> actions;
54 : :
55 : : /** Which node ids have been assigned already (to prevent reuse). */
56 : : std::set<NodeId> peerset;
57 : :
58 : : /** Which txhashes have been assigned already (to prevent reuse). */
59 : : std::set<uint256> txhashset;
60 : :
61 : : /** Which (peer, gtxid) combinations are known to be expired. These need to be accumulated here instead of
62 : : * checked directly in the GetRequestable return value to avoid introducing a dependency between the various
63 : : * parallel tests. */
64 : : std::multiset<std::pair<NodeId, GenTxid>> expired;
65 : : };
66 : :
67 : 8928 : std::chrono::microseconds TxRequestTest::RandomTime8s() { return std::chrono::microseconds{1 + m_rng.randbits(23)}; }
68 : 5 : std::chrono::microseconds TxRequestTest::RandomTime1y() { return std::chrono::microseconds{1 + m_rng.randbits(45)}; }
69 : :
70 : : /** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker.
71 : : *
72 : : * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a
73 : : * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed
74 : : * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not
75 : : * reused in other tests, and thus they should be independent from each other. Running them in parallel however
76 : : * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data
77 : : * structure is more complicated due to the presence of other tests.
78 : : */
79 : 225 : class Scenario
80 : : {
81 : : FastRandomContext& m_rng;
82 : : Runner& m_runner;
83 : : std::chrono::microseconds m_now;
84 : : std::string m_testname;
85 : :
86 : : public:
87 : 225 : Scenario(FastRandomContext& rng, Runner& runner, std::chrono::microseconds starttime) : m_rng(rng), m_runner(runner), m_now(starttime) {}
88 : :
89 : : /** Set a name for the current test, to give more clear error messages. */
90 : 1600 : void SetTestName(std::string testname)
91 : : {
92 : 1600 : m_testname = std::move(testname);
93 : : }
94 : :
95 : : /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */
96 : 10093 : void AdvanceTime(std::chrono::microseconds amount)
97 : : {
98 [ - + ]: 10093 : assert(amount.count() >= 0);
99 : 10093 : m_now += amount;
100 : 10093 : }
101 : :
102 : : /** Schedule a ForgetTxHash call at the Scheduler's current time. */
103 : 400 : void ForgetTxHash(const uint256& txhash)
104 : : {
105 : 400 : auto& runner = m_runner;
106 : 400 : runner.actions.emplace_back(m_now, [=, &runner]() {
107 : 400 : runner.txrequest.ForgetTxHash(txhash);
108 : 400 : runner.txrequest.SanityCheck();
109 : 400 : });
110 : 400 : }
111 : :
112 : : /** Schedule a ReceivedInv call at the Scheduler's current time. */
113 : 5600 : void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime)
114 : : {
115 : 5600 : auto& runner = m_runner;
116 : 5600 : runner.actions.emplace_back(m_now, [=, &runner]() {
117 : 5600 : runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime);
118 : 5600 : runner.txrequest.SanityCheck();
119 : 5600 : });
120 : 5600 : }
121 : :
122 : : /** Schedule a DisconnectedPeer call at the Scheduler's current time. */
123 : 2239 : void DisconnectedPeer(NodeId peer)
124 : : {
125 : 2239 : auto& runner = m_runner;
126 : 2239 : runner.actions.emplace_back(m_now, [=, &runner]() {
127 : 2239 : runner.txrequest.DisconnectedPeer(peer);
128 : 2239 : runner.txrequest.SanityCheck();
129 : 2239 : });
130 : 2239 : }
131 : :
132 : : /** Schedule a RequestedTx call at the Scheduler's current time. */
133 : 2960 : void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime)
134 : : {
135 : 2960 : auto& runner = m_runner;
136 : 2960 : runner.actions.emplace_back(m_now, [=, &runner]() {
137 : 2960 : runner.txrequest.RequestedTx(peer, txhash, exptime);
138 : 2960 : runner.txrequest.SanityCheck();
139 : 2960 : });
140 : 2960 : }
141 : :
142 : : /** Schedule a ReceivedResponse call at the Scheduler's current time. */
143 : 961 : void ReceivedResponse(NodeId peer, const uint256& txhash)
144 : : {
145 : 961 : auto& runner = m_runner;
146 : 961 : runner.actions.emplace_back(m_now, [=, &runner]() {
147 : 961 : runner.txrequest.ReceivedResponse(peer, txhash);
148 : 961 : runner.txrequest.SanityCheck();
149 : 961 : });
150 : 961 : }
151 : :
152 : : /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time.
153 : : *
154 : : * @param peer The peer whose state will be inspected.
155 : : * @param expected The expected return value for GetRequestable(peer)
156 : : * @param candidates The expected return value CountCandidates(peer)
157 : : * @param inflight The expected return value CountInFlight(peer)
158 : : * @param completed The expected return value of Count(peer), minus candidates and inflight.
159 : : * @param checkname An arbitrary string to include in error messages, for test identificatrion.
160 : : * @param offset Offset with the current time to use (must be <= 0). This allows simulations of time going
161 : : * backwards (but note that the ordering of this event only follows the scenario's m_now.
162 : : */
163 : 29368 : void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight,
164 : : size_t completed, const std::string& checkname,
165 : : std::chrono::microseconds offset = std::chrono::microseconds{0})
166 : : {
167 [ + - ]: 29368 : const auto comment = m_testname + " " + checkname;
168 : 29368 : auto& runner = m_runner;
169 : 29368 : const auto now = m_now;
170 [ - + ]: 29368 : assert(offset.count() <= 0);
171 [ + - + - : 58736 : runner.actions.emplace_back(m_now, [=, &runner]() {
+ - ]
172 : 29368 : std::vector<std::pair<NodeId, GenTxid>> expired_now;
173 [ + - ]: 29368 : auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now);
174 [ + + ]: 30408 : for (const auto& entry : expired_now) {
175 [ + - ]: 1040 : runner.expired.insert(entry);
176 : : }
177 [ + - ]: 29368 : runner.txrequest.SanityCheck();
178 [ + - ]: 29368 : runner.txrequest.PostGetRequestableSanityCheck(now + offset);
179 : 29368 : size_t total = candidates + inflight + completed;
180 [ + - ]: 29368 : size_t real_total = runner.txrequest.Count(peer);
181 [ + - ]: 29368 : size_t real_candidates = runner.txrequest.CountCandidates(peer);
182 [ + - ]: 29368 : size_t real_inflight = runner.txrequest.CountInFlight(peer);
183 [ + - + - : 58736 : BOOST_CHECK_MESSAGE(real_total == total, strprintf("[%s] total %i (%i expected)", comment, real_total, total));
+ - ]
184 [ + - + - : 58736 : BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[%s] inflight %i (%i expected)", comment, real_inflight, inflight));
+ - ]
185 [ + - + - : 58736 : BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[%s] candidates %i (%i expected)", comment, real_candidates, candidates));
+ - ]
186 [ + - + - : 58736 : BOOST_CHECK_MESSAGE(ret == expected, strprintf("[%s] mismatching requestables", comment));
+ - ]
187 : 29368 : });
188 : 29368 : }
189 : :
190 : : /** Verify that an announcement for gtxid by peer has expired some time before this check is scheduled.
191 : : *
192 : : * Every expected expiration should be accounted for through exactly one call to this function.
193 : : */
194 : 1040 : void CheckExpired(NodeId peer, GenTxid gtxid)
195 : : {
196 : 1040 : const auto& testname = m_testname;
197 : 1040 : auto& runner = m_runner;
198 [ + - ]: 2080 : runner.actions.emplace_back(m_now, [=, &runner]() {
199 : 1040 : auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid});
200 [ + - + - ]: 3120 : BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration");
201 [ + - ]: 1040 : if (it != runner.expired.end()) runner.expired.erase(it);
202 : 1040 : });
203 : 1040 : }
204 : :
205 : : /** Generate a random txhash, whose priorities for certain peers are constrained.
206 : : *
207 : : * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both:
208 : : * - priority(p1,T) > priority(p2,T) > priority(p3,T)
209 : : * - priority(p2,T) > priority(p4,T) > priority(p5,T)
210 : : * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements
211 : : * are within the same preferredness class.
212 : : */
213 : 2880 : uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {})
214 : : {
215 : 2880 : uint256 ret;
216 : 306182 : bool ok;
217 : 306182 : do {
218 : 306182 : ret = m_rng.rand256();
219 : 306182 : ok = true;
220 [ + + ]: 480146 : for (const auto& order : orders) {
221 [ + + ]: 698341 : for (size_t pos = 1; pos < order.size(); ++pos) {
222 : 524377 : uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true);
223 : 524377 : uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true);
224 [ + + ]: 524377 : if (prio_prev <= prio_cur) {
225 : : ok = false;
226 : : break;
227 : : }
228 : : }
229 [ + + ]: 477266 : if (!ok) break;
230 : : }
231 [ + + ]: 306182 : if (ok) {
232 : 2880 : ok = m_runner.txhashset.insert(ret).second;
233 : : }
234 [ + + ]: 306182 : } while(!ok);
235 : 2880 : return ret;
236 : : }
237 : :
238 : : /** Generate a random GenTxid; the txhash follows NewTxHash; the transaction identifier is random. */
239 : 2560 : GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {})
240 : : {
241 : 2560 : const uint256 txhash{NewTxHash(orders)};
242 [ + + ]: 2560 : return m_rng.randbool() ? GenTxid{Wtxid::FromUint256(txhash)} : GenTxid{Txid::FromUint256(txhash)};
243 : : }
244 : :
245 : : /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice
246 : : * (across all Scenarios combined). */
247 : 4640 : NodeId NewPeer()
248 : : {
249 : 4640 : bool ok;
250 : 4640 : NodeId ret;
251 : 4640 : do {
252 : 4640 : ret = m_rng.randbits(63);
253 : 4640 : ok = m_runner.peerset.insert(ret).second;
254 [ - + ]: 4640 : } while(!ok);
255 : 4640 : return ret;
256 : : }
257 : :
258 : 6212 : std::chrono::microseconds Now() const { return m_now; }
259 : : };
260 : :
261 : : /** Add to scenario a test with a single tx announced by a single peer.
262 : : *
263 : : * config is an integer in [0, 32), which controls which variant of the test is used.
264 : : */
265 : 320 : void TxRequestTest::BuildSingleTest(Scenario& scenario, int config)
266 : : {
267 : 320 : auto peer = scenario.NewPeer();
268 [ + - ]: 320 : auto gtxid = scenario.NewGTxid();
269 : 320 : bool immediate = config & 1;
270 : 320 : bool preferred = config & 2;
271 [ + + ]: 320 : auto delay = immediate ? NO_TIME : RandomTime8s();
272 : :
273 : 320 : scenario.SetTestName(strprintf("Single(config=%i)", config));
274 : :
275 : : // Receive an announcement, either immediately requestable or delayed.
276 [ + + ]: 320 : scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay);
277 [ + + ]: 320 : if (immediate) {
278 [ + - + - ]: 320 : scenario.Check(peer, {gtxid}, 1, 0, 0, "s1");
279 : : } else {
280 [ + - ]: 320 : scenario.Check(peer, {}, 1, 0, 0, "s2");
281 : 160 : scenario.AdvanceTime(delay - MICROSECOND);
282 [ + - ]: 320 : scenario.Check(peer, {}, 1, 0, 0, "s3");
283 : 160 : scenario.AdvanceTime(MICROSECOND);
284 [ + - + - ]: 320 : scenario.Check(peer, {gtxid}, 1, 0, 0, "s4");
285 : : }
286 : :
287 [ + + ]: 320 : if (config >> 3) { // We'll request the transaction
288 : 240 : scenario.AdvanceTime(RandomTime8s());
289 : 240 : auto expiry = RandomTime8s();
290 [ + - + - ]: 480 : scenario.Check(peer, {gtxid}, 1, 0, 0, "s5");
291 [ - + ]: 240 : scenario.RequestedTx(peer, gtxid.ToUint256(), scenario.Now() + expiry);
292 [ + - ]: 480 : scenario.Check(peer, {}, 0, 1, 0, "s6");
293 : :
294 [ + + ]: 240 : if ((config >> 3) == 1) { // The request will time out
295 : 80 : scenario.AdvanceTime(expiry - MICROSECOND);
296 [ + - ]: 160 : scenario.Check(peer, {}, 0, 1, 0, "s7");
297 : 80 : scenario.AdvanceTime(MICROSECOND);
298 [ + - ]: 160 : scenario.Check(peer, {}, 0, 0, 0, "s8");
299 : 80 : scenario.CheckExpired(peer, gtxid);
300 : 80 : return;
301 : : } else {
302 : 160 : scenario.AdvanceTime(std::chrono::microseconds{m_rng.randrange(expiry.count())});
303 [ + - ]: 320 : scenario.Check(peer, {}, 0, 1, 0, "s9");
304 [ + + ]: 160 : if ((config >> 3) == 3) { // A response will arrive for the transaction
305 [ - + ]: 80 : scenario.ReceivedResponse(peer, gtxid.ToUint256());
306 [ + - ]: 160 : scenario.Check(peer, {}, 0, 0, 0, "s10");
307 : 80 : return;
308 : : }
309 : : }
310 : : }
311 : :
312 [ + + ]: 160 : if (config & 4) { // The peer will go offline
313 : 80 : scenario.DisconnectedPeer(peer);
314 : : } else { // The transaction is no longer needed
315 [ - + ]: 80 : scenario.ForgetTxHash(gtxid.ToUint256());
316 : : }
317 [ + - ]: 320 : scenario.Check(peer, {}, 0, 0, 0, "s11");
318 : : }
319 : :
320 : : /** Add to scenario a test with a single tx announced by two peers, to verify the
321 : : * right peer is selected for requests.
322 : : *
323 : : * config is an integer in [0, 32), which controls which variant of the test is used.
324 : : */
325 : 320 : void TxRequestTest::BuildPriorityTest(Scenario& scenario, int config)
326 : : {
327 : 320 : scenario.SetTestName(strprintf("Priority(config=%i)", config));
328 : :
329 : : // Two peers. They will announce in order {peer1, peer2}.
330 : 320 : auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer();
331 : : // Construct a transaction that under random rules would be preferred by peer2 or peer1,
332 : : // depending on configuration.
333 : 320 : bool prio1 = config & 1;
334 [ + + + - : 640 : auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}});
+ - + - +
- + - + -
+ + + + +
+ + + + +
- - - - -
- - - -
- ]
335 : 320 : bool pref1 = config & 2, pref2 = config & 4;
336 : :
337 : 320 : scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME);
338 [ + - + - ]: 640 : scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1");
339 [ + + ]: 320 : if (m_rng.randbool()) {
340 : 168 : scenario.AdvanceTime(RandomTime8s());
341 [ + - + - ]: 336 : scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2");
342 : : }
343 : :
344 : 320 : scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME);
345 : 320 : bool stage2_prio =
346 : : // At this point, peer2 will be given priority if:
347 : : // - It is preferred and peer1 is not
348 [ + + ]: 320 : (pref2 && !pref1) ||
349 : : // - They're in the same preference class,
350 : : // and the randomized priority favors peer2 over peer1.
351 [ + + ]: 240 : (pref1 == pref2 && !prio1);
352 : 160 : NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2;
353 [ + - ]: 640 : scenario.Check(otherpeer, {}, 1, 0, 0, "p3");
354 [ + - + - ]: 640 : scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4");
355 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
356 [ + - ]: 640 : scenario.Check(otherpeer, {}, 1, 0, 0, "p5");
357 [ + - + - ]: 640 : scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6");
358 : :
359 : : // We possibly request from the selected peer.
360 [ + + ]: 320 : if (config & 8) {
361 [ - + ]: 160 : scenario.RequestedTx(priopeer, gtxid.ToUint256(), MAX_TIME);
362 [ + - ]: 320 : scenario.Check(priopeer, {}, 0, 1, 0, "p7");
363 [ + - ]: 320 : scenario.Check(otherpeer, {}, 1, 0, 0, "p8");
364 [ + + ]: 160 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
365 : : }
366 : :
367 : : // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them.
368 [ + + ]: 320 : if (config & 16) {
369 : 160 : scenario.DisconnectedPeer(priopeer);
370 : : } else {
371 [ - + ]: 160 : scenario.ReceivedResponse(priopeer, gtxid.ToUint256());
372 : : }
373 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
374 [ + - ]: 640 : scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8");
375 [ + - + - ]: 640 : scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9");
376 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
377 : :
378 : : // Now the other peer goes offline.
379 : 320 : scenario.DisconnectedPeer(otherpeer);
380 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
381 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 0, "p10");
382 [ + - ]: 640 : scenario.Check(peer2, {}, 0, 0, 0, "p11");
383 [ + - + - ]: 640 : }
384 : :
385 : : /** Add to scenario a randomized test in which N peers announce the same transaction, to verify
386 : : * the order in which they are requested. */
387 : 320 : void TxRequestTest::BuildBigPriorityTest(Scenario& scenario, int peers)
388 : : {
389 : 320 : scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers));
390 : :
391 : : // We will have N peers announce the same transaction.
392 : 320 : std::map<NodeId, bool> preferred;
393 : 320 : std::vector<NodeId> pref_peers, npref_peers;
394 : 320 : int num_pref = m_rng.randrange(peers + 1) ; // Some preferred, ...
395 : 320 : int num_npref = peers - num_pref; // some not preferred.
396 [ + + ]: 1011 : for (int i = 0; i < num_pref; ++i) {
397 [ + - + - ]: 691 : pref_peers.push_back(scenario.NewPeer());
398 [ + - ]: 691 : preferred[pref_peers.back()] = true;
399 : : }
400 [ + + ]: 1069 : for (int i = 0; i < num_npref; ++i) {
401 [ + - + - ]: 749 : npref_peers.push_back(scenario.NewPeer());
402 [ + - ]: 749 : preferred[npref_peers.back()] = false;
403 : : }
404 : : // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers).
405 : 320 : std::vector<NodeId> request_order;
406 [ + - ]: 320 : request_order.reserve(num_pref + num_npref);
407 [ + - + + ]: 1011 : for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]);
408 [ + - + + ]: 1069 : for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]);
409 : :
410 : : // Determine the announcement order randomly.
411 [ + - ]: 320 : std::vector<NodeId> announce_order = request_order;
412 : 320 : std::shuffle(announce_order.begin(), announce_order.end(), m_rng);
413 : :
414 : : // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and
415 : : // within npref_peers.
416 [ + - + - : 960 : auto gtxid = scenario.NewGTxid({pref_peers, npref_peers});
+ + - - ]
417 : :
418 : : // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the
419 : : // to-be-requested-from-peer will change every time a subsequent reqtime is passed.
420 : 320 : std::map<NodeId, std::chrono::microseconds> reqtimes;
421 : 320 : auto reqtime = scenario.Now();
422 [ + + ]: 1760 : for (int i = peers - 1; i >= 0; --i) {
423 [ + - ]: 1440 : reqtime += RandomTime8s();
424 [ + - ]: 1440 : reqtimes[request_order[i]] = reqtime;
425 : : }
426 : :
427 : : // Actually announce from all peers simultaneously (but in announce_order).
428 [ + + ]: 1760 : for (const auto peer : announce_order) {
429 [ + - + - : 1440 : scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]);
+ - ]
430 : : }
431 [ + + ]: 1760 : for (const auto peer : announce_order) {
432 [ + - + - ]: 2880 : scenario.Check(peer, {}, 1, 0, 0, "b1");
433 : : }
434 : :
435 : : // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from
436 : : // high priority to low priority within each class.
437 [ + + ]: 1760 : for (int i = peers - 1; i >= 0; --i) {
438 [ + - ]: 1440 : scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND);
439 [ + - + - ]: 2880 : scenario.Check(request_order[i], {}, 1, 0, 0, "b2");
440 : 1440 : scenario.AdvanceTime(MICROSECOND);
441 [ + - + - : 2880 : scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3");
+ - ]
442 : : }
443 : :
444 : : // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from
445 : : // peer should be the best remaining one, so verify this after every response.
446 [ + + ]: 1760 : for (int i = 0; i < peers; ++i) {
447 [ + + ]: 1440 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
448 : 1440 : const int pos = m_rng.randrange(request_order.size());
449 : 1440 : const auto peer = request_order[pos];
450 : 1440 : request_order.erase(request_order.begin() + pos);
451 [ + + ]: 1440 : if (m_rng.randbool()) {
452 [ + - ]: 719 : scenario.DisconnectedPeer(peer);
453 [ + - + - ]: 1438 : scenario.Check(peer, {}, 0, 0, 0, "b4");
454 : : } else {
455 [ - + + - ]: 721 : scenario.ReceivedResponse(peer, gtxid.ToUint256());
456 [ + - + - ]: 1442 : scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5");
457 : : }
458 [ + + ]: 1440 : if (request_order.size()) {
459 [ + - + - : 2240 : scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6");
+ - ]
460 : : }
461 : : }
462 : :
463 : : // Everything is gone in the end.
464 [ + + ]: 1760 : for (const auto peer : announce_order) {
465 [ + - + - ]: 2880 : scenario.Check(peer, {}, 0, 0, 0, "b7");
466 : : }
467 [ + - + - : 640 : }
- - ]
468 : :
469 : : /** Add to scenario a test with one peer announcing two transactions, to verify they are
470 : : * fetched in announcement order.
471 : : *
472 : : * config is an integer in [0, 4) inclusive, and selects the variant of the test.
473 : : */
474 : 320 : void TxRequestTest::BuildRequestOrderTest(Scenario& scenario, int config)
475 : : {
476 : 320 : scenario.SetTestName(strprintf("RequestOrder(config=%i)", config));
477 : :
478 : 320 : auto peer = scenario.NewPeer();
479 [ + - ]: 320 : auto gtxid1 = scenario.NewGTxid();
480 [ + - ]: 320 : auto gtxid2 = scenario.NewGTxid();
481 : :
482 : 320 : auto reqtime2 = scenario.Now() + RandomTime8s();
483 : 320 : auto reqtime1 = reqtime2 + RandomTime8s();
484 : :
485 : 320 : scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1);
486 : : // Simulate time going backwards by giving the second announcement an earlier reqtime.
487 : 320 : scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2);
488 : :
489 : 320 : scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now());
490 [ + - ]: 640 : scenario.Check(peer, {}, 2, 0, 0, "o1");
491 : 320 : scenario.AdvanceTime(MICROSECOND);
492 [ + - + - ]: 640 : scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2");
493 : 320 : scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now());
494 [ + - + - ]: 640 : scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3");
495 : 320 : scenario.AdvanceTime(MICROSECOND);
496 : : // Even with time going backwards in between announcements, the return value of GetRequestable is in
497 : : // announcement order.
498 [ + - + - ]: 640 : scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4");
499 : :
500 : 320 : scenario.DisconnectedPeer(peer);
501 [ + - ]: 640 : scenario.Check(peer, {}, 0, 0, 0, "o5");
502 : 320 : }
503 : :
504 : : /** Add to scenario a test that verifies behavior related to both txid and wtxid with the same
505 : : * hash being announced.
506 : : *
507 : : * config is an integer in [0, 4) inclusive, and selects the variant of the test used.
508 : : */
509 : 320 : void TxRequestTest::BuildWtxidTest(Scenario& scenario, int config)
510 : : {
511 : 320 : scenario.SetTestName(strprintf("Wtxid(config=%i)", config));
512 : :
513 : 320 : auto peerT = scenario.NewPeer();
514 : 320 : auto peerW = scenario.NewPeer();
515 [ + - ]: 320 : auto txhash = scenario.NewTxHash();
516 : 320 : auto txid{Txid::FromUint256(txhash)};
517 : 320 : auto wtxid{Wtxid::FromUint256(txhash)};
518 : :
519 [ + + ]: 320 : auto reqtimeT = m_rng.randbool() ? MIN_TIME : scenario.Now() + RandomTime8s();
520 [ + + ]: 320 : auto reqtimeW = m_rng.randbool() ? MIN_TIME : scenario.Now() + RandomTime8s();
521 : :
522 : : // Announce txid first or wtxid first.
523 [ + + ]: 320 : if (config & 1) {
524 : 160 : scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
525 [ + + ]: 160 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
526 : 160 : scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
527 : : } else {
528 : 160 : scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
529 [ + + ]: 160 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
530 : 160 : scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
531 : : }
532 : :
533 : : // Let time pass if needed, and check that the preferred announcement (txid or wtxid)
534 : : // is correctly to-be-requested (and with the correct wtxidness).
535 : 320 : auto max_reqtime = std::max(reqtimeT, reqtimeW);
536 [ + + ]: 320 : if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now());
537 [ + + ]: 320 : if (config & 2) {
538 [ + - + - ]: 320 : scenario.Check(peerT, {txid}, 1, 0, 0, "w1");
539 [ + - ]: 320 : scenario.Check(peerW, {}, 1, 0, 0, "w2");
540 : : } else {
541 [ + - ]: 320 : scenario.Check(peerT, {}, 1, 0, 0, "w3");
542 [ + - + - ]: 320 : scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4");
543 : : }
544 : :
545 : : // Let the preferred announcement be requested. It's not going to be delivered.
546 : 320 : auto expiry = RandomTime8s();
547 [ + + ]: 320 : if (config & 2) {
548 : 160 : scenario.RequestedTx(peerT, txid.ToUint256(), scenario.Now() + expiry);
549 [ + - ]: 320 : scenario.Check(peerT, {}, 0, 1, 0, "w5");
550 [ + - ]: 320 : scenario.Check(peerW, {}, 1, 0, 0, "w6");
551 : : } else {
552 : 160 : scenario.RequestedTx(peerW, wtxid.ToUint256(), scenario.Now() + expiry);
553 [ + - ]: 320 : scenario.Check(peerT, {}, 1, 0, 0, "w7");
554 [ + - ]: 320 : scenario.Check(peerW, {}, 0, 1, 0, "w8");
555 : : }
556 : :
557 : : // After reaching expiration time of the preferred announcement, verify that the
558 : : // remaining one is requestable
559 : 320 : scenario.AdvanceTime(expiry);
560 [ + + ]: 320 : if (config & 2) {
561 [ + - ]: 320 : scenario.Check(peerT, {}, 0, 0, 1, "w9");
562 [ + - + - ]: 320 : scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10");
563 : 160 : scenario.CheckExpired(peerT, txid);
564 : : } else {
565 [ + - + - ]: 320 : scenario.Check(peerT, {txid}, 1, 0, 0, "w11");
566 [ + - ]: 320 : scenario.Check(peerW, {}, 0, 0, 1, "w12");
567 : 160 : scenario.CheckExpired(peerW, wtxid);
568 : : }
569 : :
570 : : // If a good transaction with either that hash as wtxid or txid arrives, both
571 : : // announcements are gone.
572 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
573 : 320 : scenario.ForgetTxHash(txhash);
574 [ + - ]: 640 : scenario.Check(peerT, {}, 0, 0, 0, "w13");
575 [ + - ]: 640 : scenario.Check(peerW, {}, 0, 0, 0, "w14");
576 : 320 : }
577 : :
578 : : /** Add to scenario a test that exercises clocks that go backwards. */
579 : 320 : void TxRequestTest::BuildTimeBackwardsTest(Scenario& scenario)
580 : : {
581 : 320 : auto peer1 = scenario.NewPeer();
582 : 320 : auto peer2 = scenario.NewPeer();
583 [ + - + - : 640 : auto gtxid = scenario.NewGTxid({{peer1, peer2}});
+ + - - ]
584 : :
585 : : // Announce from peer2.
586 : 320 : auto reqtime = scenario.Now() + RandomTime8s();
587 : 320 : scenario.ReceivedInv(peer2, gtxid, true, reqtime);
588 [ + - ]: 640 : scenario.Check(peer2, {}, 1, 0, 0, "r1");
589 : 320 : scenario.AdvanceTime(reqtime - scenario.Now());
590 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2");
591 : : // Check that if the clock goes backwards by 1us, the transaction would stop being requested.
592 [ + - ]: 640 : scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND);
593 : : // But it reverts to being requested if time goes forward again.
594 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4");
595 : :
596 : : // Announce from peer1.
597 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
598 : 320 : scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME);
599 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5");
600 [ + - ]: 640 : scenario.Check(peer1, {}, 1, 0, 0, "r6");
601 : :
602 : : // Request from peer1.
603 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
604 [ - + ]: 320 : auto expiry = scenario.Now() + RandomTime8s();
605 [ - + ]: 320 : scenario.RequestedTx(peer1, gtxid.ToUint256(), expiry);
606 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 1, 0, "r7");
607 [ + - ]: 640 : scenario.Check(peer2, {}, 1, 0, 0, "r8");
608 : :
609 : : // Expiration passes.
610 : 320 : scenario.AdvanceTime(expiry - scenario.Now());
611 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 1, "r9");
612 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2.
613 : 320 : scenario.CheckExpired(peer1, gtxid);
614 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire.
615 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND);
616 : :
617 : : // Peer2 goes offline, meaning no viable announcements remain.
618 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
619 : 320 : scenario.DisconnectedPeer(peer2);
620 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 0, "r13");
621 [ + - ]: 640 : scenario.Check(peer2, {}, 0, 0, 0, "r14");
622 : 640 : }
623 : :
624 : : /** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */
625 : 320 : void TxRequestTest::BuildWeirdRequestsTest(Scenario& scenario)
626 : : {
627 : 320 : auto peer1 = scenario.NewPeer();
628 : 320 : auto peer2 = scenario.NewPeer();
629 [ + - + - : 640 : auto gtxid1 = scenario.NewGTxid({{peer1, peer2}});
+ + - - ]
630 [ + - + - : 640 : auto gtxid2 = scenario.NewGTxid({{peer2, peer1}});
+ + - - ]
631 : :
632 : : // Announce gtxid1 by peer1.
633 : 320 : scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME);
634 [ + - + - ]: 640 : scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1");
635 : :
636 : : // Announce gtxid2 by peer2.
637 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
638 : 320 : scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME);
639 [ + - + - ]: 640 : scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2");
640 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3");
641 : :
642 : : // We request gtxid2 from *peer1* - no effect.
643 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
644 [ - + ]: 320 : scenario.RequestedTx(peer1, gtxid2.ToUint256(), MAX_TIME);
645 [ + - + - ]: 640 : scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4");
646 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5");
647 : :
648 : : // Now request gtxid1 from peer1 - marks it as REQUESTED.
649 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
650 [ - + ]: 320 : auto expiryA = scenario.Now() + RandomTime8s();
651 [ - + ]: 320 : scenario.RequestedTx(peer1, gtxid1.ToUint256(), expiryA);
652 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 1, 0, "q6");
653 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7");
654 : :
655 : : // Request it a second time - nothing happens, as it's already REQUESTED.
656 [ - + ]: 320 : auto expiryB = expiryA + RandomTime8s();
657 [ - + ]: 320 : scenario.RequestedTx(peer1, gtxid1.ToUint256(), expiryB);
658 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 1, 0, "q8");
659 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9");
660 : :
661 : : // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires.
662 : 320 : scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME);
663 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 1, 0, "q10");
664 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11");
665 : :
666 : : // When reaching expiryA, it expires (not expiryB, which is later).
667 : 320 : scenario.AdvanceTime(expiryA - scenario.Now());
668 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 1, "q12");
669 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13");
670 : 320 : scenario.CheckExpired(peer1, gtxid1);
671 : :
672 : : // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED.
673 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
674 [ - + ]: 320 : scenario.RequestedTx(peer1, gtxid1.ToUint256(), MAX_TIME);
675 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 1, "q14");
676 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15");
677 : :
678 : : // Now announce gtxid2 from peer1.
679 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
680 : 320 : scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME);
681 [ + - ]: 640 : scenario.Check(peer1, {}, 1, 0, 1, "q16");
682 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17");
683 : :
684 : : // And request it from peer1 (weird as peer2 has the preference).
685 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
686 [ - + ]: 320 : scenario.RequestedTx(peer1, gtxid2.ToUint256(), MAX_TIME);
687 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 1, 1, "q18");
688 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19");
689 : :
690 : : // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED.
691 [ + + ]: 320 : if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
692 [ - + ]: 320 : scenario.RequestedTx(peer2, gtxid2.ToUint256(), MAX_TIME);
693 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 2, "q20");
694 [ + - + - ]: 640 : scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21");
695 : :
696 : : // If peer2 goes offline, no viable announcements remain.
697 : 320 : scenario.DisconnectedPeer(peer2);
698 [ + - ]: 640 : scenario.Check(peer1, {}, 0, 0, 0, "q22");
699 [ + - ]: 640 : scenario.Check(peer2, {}, 0, 0, 0, "q23");
700 : 960 : }
701 : :
702 : 5 : void TxRequestTest::TestInterleavedScenarios()
703 : : {
704 : : // Create a list of functions which add tests to scenarios.
705 : 5 : std::vector<std::function<void(Scenario&)>> builders;
706 : : // Add instances of every test, for every configuration.
707 [ + + ]: 325 : for (int n = 0; n < 64; ++n) {
708 [ + - ]: 640 : builders.emplace_back([this, n](Scenario& scenario) { BuildWtxidTest(scenario, n); });
709 [ + - ]: 640 : builders.emplace_back([this, n](Scenario& scenario) { BuildRequestOrderTest(scenario, n & 3); });
710 [ + - ]: 640 : builders.emplace_back([this, n](Scenario& scenario) { BuildSingleTest(scenario, n & 31); });
711 [ + - ]: 640 : builders.emplace_back([this, n](Scenario& scenario) { BuildPriorityTest(scenario, n & 31); });
712 [ + - ]: 640 : builders.emplace_back([this, n](Scenario& scenario) { BuildBigPriorityTest(scenario, (n & 7) + 1); });
713 [ + - ]: 640 : builders.emplace_back([this](Scenario& scenario) { BuildTimeBackwardsTest(scenario); });
714 [ + - ]: 640 : builders.emplace_back([this](Scenario& scenario) { BuildWeirdRequestsTest(scenario); });
715 : : }
716 : : // Randomly shuffle all those functions.
717 : 5 : std::shuffle(builders.begin(), builders.end(), m_rng);
718 : :
719 [ + - ]: 5 : Runner runner;
720 : 5 : auto starttime = RandomTime1y();
721 : : // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each.
722 [ + + ]: 230 : while (builders.size()) {
723 : : // Introduce some variation in the start time of each scenario, so they don't all start off
724 : : // concurrently, but get a more random interleaving.
725 : 225 : auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s();
726 : 225 : Scenario scenario(m_rng, runner, scenario_start);
727 [ + + + + ]: 2465 : for (int j = 0; builders.size() && j < 10; ++j) {
728 [ + - ]: 2240 : builders.back()(scenario);
729 : 2240 : builders.pop_back();
730 : : }
731 : 225 : }
732 : : // Sort all the actions from all those scenarios chronologically, resulting in the actions from
733 : : // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario
734 : : // aren't reordered w.r.t. each other.
735 : 5 : std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) {
736 [ + + + + : 437123 : return a1.first < a2.first;
- - - - +
+ + + + +
- - + + ]
737 : : });
738 : :
739 : : // Run all actions from all scenarios, in order.
740 [ + + ]: 42573 : for (auto& action : runner.actions) {
741 [ + - ]: 42568 : action.second();
742 : : }
743 : :
744 [ + - + - : 5 : BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U);
+ - ]
745 [ + - + - ]: 10 : BOOST_CHECK(runner.expired.empty());
746 : 5 : }
747 : :
748 : : } // namespace
749 : :
750 : : BOOST_FIXTURE_TEST_SUITE(txrequest_tests, TxRequestTest)
751 : :
752 [ + - + - : 7 : BOOST_AUTO_TEST_CASE(TxRequestTest)
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- ]
753 : : {
754 [ + + ]: 6 : for (int i = 0; i < 5; ++i) {
755 : 5 : TestInterleavedScenarios();
756 : : }
757 : 1 : }
758 : :
759 : : BOOST_AUTO_TEST_SUITE_END()
|