Branch data Line data Source code
1 : : // Copyright (c) 2012-2021 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <cuckoocache.h>
6 : : #include <random.h>
7 : : #include <script/sigcache.h>
8 : : #include <test/util/random.h>
9 : : #include <test/util/setup_common.h>
10 : :
11 : : #include <boost/test/unit_test.hpp>
12 : :
13 : : #include <deque>
14 : : #include <mutex>
15 : : #include <shared_mutex>
16 : : #include <thread>
17 : : #include <vector>
18 : :
19 : : /** Test Suite for CuckooCache
20 : : *
21 : : * 1. All tests should have a deterministic result (using insecure rand
22 : : * with deterministic seeds)
23 : : * 2. Some test methods are templated to allow for easier testing
24 : : * against new versions / comparing
25 : : * 3. Results should be treated as a regression test, i.e., did the behavior
26 : : * change significantly from what was expected. This can be OK, depending on
27 : : * the nature of the change, but requires updating the tests to reflect the new
28 : : * expected behavior. For example improving the hit rate may cause some tests
29 : : * using BOOST_CHECK_CLOSE to fail.
30 : : *
31 : : */
32 : : BOOST_FIXTURE_TEST_SUITE(cuckoocache_tests, BasicTestingSetup);
33 : :
34 : : /* Test that no values not inserted into the cache are read out of it.
35 : : *
36 : : * There are no repeats in the first 200000 m_rng.rand256() calls
37 : : */
38 [ + - + - : 7 : BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- ]
39 : : {
40 : 1 : SeedRandomForTest(SeedRand::ZEROS);
41 : 1 : CuckooCache::cache<uint256, SignatureCacheHasher> cc{};
42 : 1 : size_t megabytes = 4;
43 [ + - ]: 1 : cc.setup_bytes(megabytes << 20);
44 [ + + ]: 100001 : for (int x = 0; x < 100000; ++x) {
45 : 100000 : cc.insert(m_rng.rand256());
46 : : }
47 [ + + ]: 100001 : for (int x = 0; x < 100000; ++x) {
48 [ + - + - ]: 200000 : BOOST_CHECK(!cc.contains(m_rng.rand256(), false));
49 : : }
50 : 1 : };
51 : :
52 : 2 : struct HitRateTest : BasicTestingSetup {
53 : : /** This helper returns the hit rate when megabytes*load worth of entries are
54 : : * inserted into a megabytes sized cache
55 : : */
56 : : template <typename Cache>
57 : 5 : double test_cache(size_t megabytes, double load)
58 : : {
59 : 5 : SeedRandomForTest(SeedRand::ZEROS);
60 : 5 : std::vector<uint256> hashes;
61 [ + - ]: 5 : Cache set{};
62 : 5 : size_t bytes = megabytes * (1 << 20);
63 [ + - ]: 5 : set.setup_bytes(bytes);
64 : 5 : uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
65 [ + - ]: 5 : hashes.resize(n_insert);
66 [ + + ]: 406326 : for (uint32_t i = 0; i < n_insert; ++i) {
67 : 406321 : uint32_t* ptr = (uint32_t*)hashes[i].begin();
68 [ + + ]: 3656889 : for (uint8_t j = 0; j < 8; ++j)
69 : 3250568 : *(ptr++) = m_rng.rand32();
70 : : }
71 : : /** We make a copy of the hashes because future optimizations of the
72 : : * cuckoocache may overwrite the inserted element, so the test is
73 : : * "future proofed".
74 : : */
75 [ + - ]: 5 : std::vector<uint256> hashes_insert_copy = hashes;
76 : : /** Do the insert */
77 [ + + ]: 406326 : for (const uint256& h : hashes_insert_copy)
78 : 406321 : set.insert(h);
79 : : /** Count the hits */
80 : 5 : uint32_t count = 0;
81 [ + + ]: 406326 : for (const uint256& h : hashes)
82 : 406321 : count += set.contains(h, false);
83 : 5 : double hit_rate = ((double)count) / ((double)n_insert);
84 : 5 : return hit_rate;
85 : 5 : }
86 : :
87 : : /** The normalized hit rate for a given load.
88 : : *
89 : : * The semantics are a little confusing, so please see the below
90 : : * explanation.
91 : : *
92 : : * Examples:
93 : : *
94 : : * 1. at load 0.5, we expect a perfect hit rate, so we multiply by
95 : : * 1.0
96 : : * 2. at load 2.0, we expect to see half the entries, so a perfect hit rate
97 : : * would be 0.5. Therefore, if we see a hit rate of 0.4, 0.4*2.0 = 0.8 is the
98 : : * normalized hit rate.
99 : : *
100 : : * This is basically the right semantics, but has a bit of a glitch depending on
101 : : * how you measure around load 1.0 as after load 1.0 your normalized hit rate
102 : : * becomes effectively perfect, ignoring freshness.
103 : : */
104 : 5 : static double normalize_hit_rate(double hits, double load)
105 : : {
106 [ + + ]: 5 : return hits * std::max(load, 1.0);
107 : : }
108 : : }; // struct HitRateTest
109 : :
110 : : /** Check the hit rate on loads ranging from 0.1 to 1.6 */
111 [ + - + - : 7 : BOOST_FIXTURE_TEST_CASE(cuckoocache_hit_rate_ok, HitRateTest)
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- ]
112 : : {
113 : : /** Arbitrarily selected Hit Rate threshold that happens to work for this test
114 : : * as a lower bound on performance.
115 : : */
116 : 1 : double HitRateThresh = 0.98;
117 : 1 : size_t megabytes = 4;
118 [ + + ]: 6 : for (double load = 0.1; load < 2; load *= 2) {
119 : 5 : double hits = test_cache<CuckooCache::cache<uint256, SignatureCacheHasher>>(megabytes, load);
120 [ + + + - ]: 11 : BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh);
121 : : }
122 : 1 : }
123 : :
124 : :
125 : 2 : struct EraseTest : BasicTestingSetup {
126 : : /** This helper checks that erased elements are preferentially inserted onto and
127 : : * that the hit rate of "fresher" keys is reasonable*/
128 : : template <typename Cache>
129 : 1 : void test_cache_erase(size_t megabytes)
130 : : {
131 : 1 : double load = 1;
132 : 1 : SeedRandomForTest(SeedRand::ZEROS);
133 : 1 : std::vector<uint256> hashes;
134 [ + - ]: 1 : Cache set{};
135 : 1 : size_t bytes = megabytes * (1 << 20);
136 [ + - ]: 1 : set.setup_bytes(bytes);
137 : 1 : uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
138 [ + - ]: 1 : hashes.resize(n_insert);
139 [ + + ]: 131073 : for (uint32_t i = 0; i < n_insert; ++i) {
140 : 131072 : uint32_t* ptr = (uint32_t*)hashes[i].begin();
141 [ + + ]: 1179648 : for (uint8_t j = 0; j < 8; ++j)
142 : 1048576 : *(ptr++) = m_rng.rand32();
143 : : }
144 : : /** We make a copy of the hashes because future optimizations of the
145 : : * cuckoocache may overwrite the inserted element, so the test is
146 : : * "future proofed".
147 : : */
148 [ + - ]: 1 : std::vector<uint256> hashes_insert_copy = hashes;
149 : :
150 : : /** Insert the first half */
151 [ + + ]: 65537 : for (uint32_t i = 0; i < (n_insert / 2); ++i)
152 : 65536 : set.insert(hashes_insert_copy[i]);
153 : : /** Erase the first quarter */
154 [ + + ]: 32769 : for (uint32_t i = 0; i < (n_insert / 4); ++i)
155 [ + - + - ]: 65536 : BOOST_CHECK(set.contains(hashes[i], true));
156 : : /** Insert the second half */
157 [ + + ]: 65537 : for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
158 : 65536 : set.insert(hashes_insert_copy[i]);
159 : :
160 : : /** elements that we marked as erased but are still there */
161 : : size_t count_erased_but_contained = 0;
162 : : /** elements that we did not erase but are older */
163 : 32769 : size_t count_stale = 0;
164 : : /** elements that were most recently inserted */
165 : 32769 : size_t count_fresh = 0;
166 : :
167 [ + + ]: 32769 : for (uint32_t i = 0; i < (n_insert / 4); ++i)
168 : 32768 : count_erased_but_contained += set.contains(hashes[i], false);
169 [ + + ]: 32769 : for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
170 : 32768 : count_stale += set.contains(hashes[i], false);
171 [ + + ]: 65537 : for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
172 : 65536 : count_fresh += set.contains(hashes[i], false);
173 : :
174 : 1 : double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
175 : 1 : double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
176 : 1 : double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
177 : :
178 : : // Check that our hit_rate_fresh is perfect
179 [ + - + - ]: 1 : BOOST_CHECK_EQUAL(hit_rate_fresh, 1.0);
180 : : // Check that we have a more than 2x better hit rate on stale elements than
181 : : // erased elements.
182 [ + - + - ]: 2 : BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);
183 : 1 : }
184 : : }; // struct EraseTest
185 : :
186 [ + - + - : 7 : BOOST_FIXTURE_TEST_CASE(cuckoocache_erase_ok, EraseTest)
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- ]
187 : : {
188 : 1 : size_t megabytes = 4;
189 : 1 : test_cache_erase<CuckooCache::cache<uint256, SignatureCacheHasher>>(megabytes);
190 : 1 : }
191 : :
192 : 2 : struct EraseParallelTest : BasicTestingSetup {
193 : : template <typename Cache>
194 : 1 : void test_cache_erase_parallel(size_t megabytes)
195 : : {
196 : 1 : double load = 1;
197 : 1 : SeedRandomForTest(SeedRand::ZEROS);
198 : 1 : std::vector<uint256> hashes;
199 [ + - ]: 1 : Cache set{};
200 : 1 : size_t bytes = megabytes * (1 << 20);
201 [ + - ]: 1 : set.setup_bytes(bytes);
202 : 1 : uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
203 [ + - ]: 1 : hashes.resize(n_insert);
204 [ + + ]: 131073 : for (uint32_t i = 0; i < n_insert; ++i) {
205 : 131072 : uint32_t* ptr = (uint32_t*)hashes[i].begin();
206 [ + + ]: 1179648 : for (uint8_t j = 0; j < 8; ++j)
207 : 1048576 : *(ptr++) = m_rng.rand32();
208 : : }
209 : : /** We make a copy of the hashes because future optimizations of the
210 : : * cuckoocache may overwrite the inserted element, so the test is
211 : : * "future proofed".
212 : : */
213 [ + - ]: 1 : std::vector<uint256> hashes_insert_copy = hashes;
214 [ + - ]: 1 : std::shared_mutex mtx;
215 : :
216 : : {
217 : : /** Grab lock to make sure we release inserts */
218 : 1 : std::unique_lock<std::shared_mutex> l(mtx);
219 : : /** Insert the first half */
220 [ + + ]: 65537 : for (uint32_t i = 0; i < (n_insert / 2); ++i)
221 : 65536 : set.insert(hashes_insert_copy[i]);
222 : 1 : }
223 : :
224 : : /** Spin up 3 threads to run contains with erase.
225 : : */
226 : 1 : std::vector<std::thread> threads;
227 : : /** Erase the first quarter */
228 [ + + ]: 4 : for (uint32_t x = 0; x < 3; ++x)
229 : : /** Each thread is emplaced with x copy-by-value
230 : : */
231 [ + - ]: 3 : threads.emplace_back([&, x] {
232 : 3 : std::shared_lock<std::shared_mutex> l(mtx);
233 : 3 : size_t ntodo = (n_insert/4)/3;
234 : 3 : size_t start = ntodo*x;
235 : 3 : size_t end = ntodo*(x+1);
236 [ + + ]: 32769 : for (uint32_t i = start; i < end; ++i) {
237 : 32766 : bool contains = set.contains(hashes[i], true);
238 [ - + ]: 32766 : assert(contains);
239 : : }
240 : 3 : });
241 : :
242 : : /** Wait for all threads to finish
243 : : */
244 [ + + ]: 4 : for (std::thread& t : threads)
245 [ + - ]: 3 : t.join();
246 : : /** Grab lock to make sure we observe erases */
247 : 1 : std::unique_lock<std::shared_mutex> l(mtx);
248 : : /** Insert the second half */
249 [ + + ]: 65537 : for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
250 : 65536 : set.insert(hashes_insert_copy[i]);
251 : :
252 : : /** elements that we marked erased but that are still there */
253 : : size_t count_erased_but_contained = 0;
254 : : /** elements that we did not erase but are older */
255 : 32769 : size_t count_stale = 0;
256 : : /** elements that were most recently inserted */
257 : 32769 : size_t count_fresh = 0;
258 : :
259 [ + + ]: 32769 : for (uint32_t i = 0; i < (n_insert / 4); ++i)
260 : 32768 : count_erased_but_contained += set.contains(hashes[i], false);
261 [ + + ]: 32769 : for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
262 : 32768 : count_stale += set.contains(hashes[i], false);
263 [ + + ]: 65537 : for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
264 : 65536 : count_fresh += set.contains(hashes[i], false);
265 : :
266 : 1 : double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
267 : 1 : double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
268 : 1 : double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
269 : :
270 : : // Check that our hit_rate_fresh is perfect
271 [ + - + - ]: 1 : BOOST_CHECK_EQUAL(hit_rate_fresh, 1.0);
272 : : // Check that we have a more than 2x better hit rate on stale elements than
273 : : // erased elements.
274 [ + - + - : 2 : BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);
+ - ]
275 : 1 : }
276 : : }; // struct EraseParallelTest
277 [ + - + - : 7 : BOOST_FIXTURE_TEST_CASE(cuckoocache_erase_parallel_ok, EraseParallelTest)
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- ]
278 : : {
279 : 1 : size_t megabytes = 4;
280 : 1 : test_cache_erase_parallel<CuckooCache::cache<uint256, SignatureCacheHasher>>(megabytes);
281 : 1 : }
282 : :
283 : :
284 : 2 : struct GenerationsTest : BasicTestingSetup {
285 : : template <typename Cache>
286 : 1 : void test_cache_generations()
287 : : {
288 : : // This test checks that for a simulation of network activity, the fresh hit
289 : : // rate is never below 99%, and the number of times that it is worse than
290 : : // 99.9% are less than 1% of the time.
291 : 1 : double min_hit_rate = 0.99;
292 : 1 : double tight_hit_rate = 0.999;
293 : 1 : double max_rate_less_than_tight_hit_rate = 0.01;
294 : : // A cache that meets this specification is therefore shown to have a hit
295 : : // rate of at least tight_hit_rate * (1 - max_rate_less_than_tight_hit_rate) +
296 : : // min_hit_rate*max_rate_less_than_tight_hit_rate = 0.999*99%+0.99*1% == 99.89%
297 : : // hit rate with low variance.
298 : :
299 : : // We use deterministic values, but this test has also passed on many
300 : : // iterations with non-deterministic values, so it isn't "overfit" to the
301 : : // specific entropy in FastRandomContext(true) and implementation of the
302 : : // cache.
303 : 1 : SeedRandomForTest(SeedRand::ZEROS);
304 : :
305 : : // block_activity models a chunk of network activity. n_insert elements are
306 : : // added to the cache. The first and last n/4 are stored for removal later
307 : : // and the middle n/2 are not stored. This models a network which uses half
308 : : // the signatures of recently (since the last block) added transactions
309 : : // immediately and never uses the other half.
310 : 1310 : struct block_activity {
311 : : std::vector<uint256> reads;
312 : 1310 : block_activity(uint32_t n_insert, FastRandomContext& rng, Cache& c)
313 [ + - ]: 1310 : {
314 : 1310 : std::vector<uint256> inserts;
315 [ + - ]: 1310 : inserts.resize(n_insert);
316 [ + - ]: 1310 : reads.reserve(n_insert / 2);
317 [ + + ]: 1311310 : for (uint32_t i = 0; i < n_insert; ++i) {
318 : 1310000 : uint32_t* ptr = (uint32_t*)inserts[i].begin();
319 [ + + ]: 11790000 : for (uint8_t j = 0; j < 8; ++j)
320 : 10480000 : *(ptr++) = rng.rand32();
321 : : }
322 [ + + ]: 328810 : for (uint32_t i = 0; i < n_insert / 4; ++i)
323 [ + - ]: 327500 : reads.push_back(inserts[i]);
324 [ + + ]: 328810 : for (uint32_t i = n_insert - (n_insert / 4); i < n_insert; ++i)
325 [ + - ]: 327500 : reads.push_back(inserts[i]);
326 [ + + ]: 1311310 : for (const auto& h : inserts)
327 : 1310000 : c.insert(h);
328 : 1310 : }
329 : : };
330 : :
331 : 1 : const uint32_t BLOCK_SIZE = 1000;
332 : : // We expect window size 60 to perform reasonably given that each epoch
333 : : // stores 45% of the cache size (~472k).
334 : 1 : const uint32_t WINDOW_SIZE = 60;
335 : 1 : const uint32_t POP_AMOUNT = (BLOCK_SIZE / WINDOW_SIZE) / 2;
336 : 1 : const double load = 10;
337 : 1 : const size_t megabytes = 4;
338 : 1 : const size_t bytes = megabytes * (1 << 20);
339 : 1 : const uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
340 : :
341 : 1 : std::vector<block_activity> hashes;
342 [ + - ]: 1 : Cache set{};
343 [ + - ]: 1 : set.setup_bytes(bytes);
344 [ + - ]: 1 : hashes.reserve(n_insert / BLOCK_SIZE);
345 : 1311 : std::deque<block_activity> last_few;
346 : : uint32_t out_of_tight_tolerance = 0;
347 : : uint32_t total = n_insert / BLOCK_SIZE;
348 : : // we use the deque last_few to model a sliding window of blocks. at each
349 : : // step, each of the last WINDOW_SIZE block_activities checks the cache for
350 : : // POP_AMOUNT of the hashes that they inserted, and marks these erased.
351 [ + + ]: 1311 : for (uint32_t i = 0; i < total; ++i) {
352 [ + + ]: 1310 : if (last_few.size() == WINDOW_SIZE)
353 : 1250 : last_few.pop_front();
354 [ + - ]: 1310 : last_few.emplace_back(BLOCK_SIZE, m_rng, set);
355 : 1310 : uint32_t count = 0;
356 [ + + ]: 78140 : for (auto& act : last_few)
357 [ + + ]: 691470 : for (uint32_t k = 0; k < POP_AMOUNT; ++k) {
358 : 614640 : count += set.contains(act.reads.back(), true);
359 : 614640 : act.reads.pop_back();
360 : : }
361 : : // We use last_few.size() rather than WINDOW_SIZE for the correct
362 : : // behavior on the first WINDOW_SIZE iterations where the deque is not
363 : : // full yet.
364 [ + - ]: 1310 : double hit = (double(count)) / (last_few.size() * POP_AMOUNT);
365 : : // Loose Check that hit rate is above min_hit_rate
366 [ + - + - ]: 2620 : BOOST_CHECK(hit > min_hit_rate);
367 : : // Tighter check, count number of times we are less than tight_hit_rate
368 : : // (and implicitly, greater than min_hit_rate)
369 : 1310 : out_of_tight_tolerance += hit < tight_hit_rate;
370 : : }
371 : : // Check that being out of tolerance happens less than
372 : : // max_rate_less_than_tight_hit_rate of the time
373 [ + - + - ]: 2 : BOOST_CHECK(double(out_of_tight_tolerance) / double(total) < max_rate_less_than_tight_hit_rate);
374 : 1 : }
375 : : }; // struct GenerationsTest
376 [ + - + - : 7 : BOOST_FIXTURE_TEST_CASE(cuckoocache_generations, GenerationsTest)
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- ]
377 : : {
378 : 1 : test_cache_generations<CuckooCache::cache<uint256, SignatureCacheHasher>>();
379 : 1 : }
380 : :
381 : : BOOST_AUTO_TEST_SUITE_END();
|