Branch data Line data Source code
1 : : // Copyright (c) The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #ifndef BITCOIN_CLUSTER_LINEARIZE_H
6 : : #define BITCOIN_CLUSTER_LINEARIZE_H
7 : :
8 : : #include <algorithm>
9 : : #include <cstdint>
10 : : #include <numeric>
11 : : #include <optional>
12 : : #include <ranges>
13 : : #include <utility>
14 : : #include <vector>
15 : :
16 : : #include <attributes.h>
17 : : #include <memusage.h>
18 : : #include <random.h>
19 : : #include <span.h>
20 : : #include <util/feefrac.h>
21 : : #include <util/vecdeque.h>
22 : :
23 : : namespace cluster_linearize {
24 : :
25 : : /** Data type to represent transaction indices in DepGraphs and the clusters they represent. */
26 : : using DepGraphIndex = uint32_t;
27 : :
28 : : /** Data structure that holds a transaction graph's preprocessed data (fee, size, ancestors,
29 : : * descendants). */
30 : : template<typename SetType>
31 [ + + + - : 6944 : class DepGraph
+ - ][ + -
+ - + - +
- + - ]
32 : : {
33 : : /** Information about a single transaction. */
34 : : struct Entry
35 : : {
36 : : /** Fee and size of transaction itself. */
37 : 50090 : FeeFrac feerate;
38 : : /** All ancestors of the transaction (including itself). */
39 : 50090 : SetType ancestors;
40 : : /** All descendants of the transaction (including itself). */
41 : 50090 : SetType descendants;
42 : :
43 : : /** Equality operator (primarily for testing purposes). */
44 [ + - + - : 100180 : friend bool operator==(const Entry&, const Entry&) noexcept = default;
- + ]
45 : :
46 : : /** Construct an empty entry. */
47 : 75582 : Entry() noexcept = default;
48 : : /** Construct an entry with a given feerate, ancestor set, descendant set. */
49 : 82086 : Entry(const FeeFrac& f, const SetType& a, const SetType& d) noexcept : feerate(f), ancestors(a), descendants(d) {}
50 : : };
51 : :
52 : : /** Data for each transaction. */
53 : : std::vector<Entry> entries;
54 : :
55 : : /** Which positions are used. */
56 : : SetType m_used;
57 : :
58 : : public:
59 : : /** Equality operator (primarily for testing purposes). */
60 : 1883 : friend bool operator==(const DepGraph& a, const DepGraph& b) noexcept
61 : : {
62 [ + - ]: 1883 : if (a.m_used != b.m_used) return false;
63 : : // Only compare the used positions within the entries vector.
64 [ + + + + ]: 52695 : for (auto idx : a.m_used) {
65 [ + - ]: 50090 : if (a.entries[idx] != b.entries[idx]) return false;
66 : : }
67 : : return true;
68 : : }
69 : :
70 : : // Default constructors.
71 : 1596 : DepGraph() noexcept = default;
72 : : DepGraph(const DepGraph&) noexcept = default;
73 : : DepGraph(DepGraph&&) noexcept = default;
74 : 334 : DepGraph& operator=(const DepGraph&) noexcept = default;
75 : 5068 : DepGraph& operator=(DepGraph&&) noexcept = default;
76 : :
77 : : /** Construct a DepGraph object given another DepGraph and a mapping from old to new.
78 : : *
79 : : * @param depgraph The original DepGraph that is being remapped.
80 : : *
81 : : * @param mapping A span such that mapping[i] gives the position in the new DepGraph
82 : : * for position i in the old depgraph. Its size must be equal to
83 : : * depgraph.PositionRange(). The value of mapping[i] is ignored if
84 : : * position i is a hole in depgraph (i.e., if !depgraph.Positions()[i]).
85 : : *
86 : : * @param pos_range The PositionRange() for the new DepGraph. It must equal the largest
87 : : * value in mapping for any used position in depgraph plus 1, or 0 if
88 : : * depgraph.TxCount() == 0.
89 : : *
90 : : * Complexity: O(N^2) where N=depgraph.TxCount().
91 : : */
92 [ - + ]: 2814 : DepGraph(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> mapping, DepGraphIndex pos_range) noexcept : entries(pos_range)
93 : : {
94 [ - + ]: 2814 : Assume(mapping.size() == depgraph.PositionRange());
95 : 2814 : Assume((pos_range == 0) == (depgraph.TxCount() == 0));
96 [ + + ]: 77919 : for (DepGraphIndex i : depgraph.Positions()) {
97 : 75105 : auto new_idx = mapping[i];
98 : 75105 : Assume(new_idx < pos_range);
99 : : // Add transaction.
100 : 75105 : entries[new_idx].ancestors = SetType::Singleton(new_idx);
101 : 75105 : entries[new_idx].descendants = SetType::Singleton(new_idx);
102 : 75105 : m_used.Set(new_idx);
103 : : // Fill in fee and size.
104 : 75105 : entries[new_idx].feerate = depgraph.entries[i].feerate;
105 : : }
106 [ + + ]: 77919 : for (DepGraphIndex i : depgraph.Positions()) {
107 : : // Fill in dependencies by mapping direct parents.
108 : 75105 : SetType parents;
109 [ + + + + ]: 321075 : for (auto j : depgraph.GetReducedParents(i)) parents.Set(mapping[j]);
110 : 75105 : AddDependencies(parents, mapping[i]);
111 : : }
112 : : // Verify that the provided pos_range was correct (no unused positions at the end).
113 [ + - ]: 4551 : Assume(m_used.None() ? (pos_range == 0) : (pos_range == m_used.Last() + 1));
114 : 2814 : }
115 : :
116 : : /** Get the set of transactions positions in use. Complexity: O(1). */
117 [ + + + + : 89734 : const SetType& Positions() const noexcept { return m_used; }
+ - + - +
+ + + + +
+ - + + +
+ + - + -
+ - + - +
- + - ]
118 : : /** Get the range of positions in this DepGraph. All entries in Positions() are in [0, PositionRange() - 1]. */
119 [ - + - + : 60841 : DepGraphIndex PositionRange() const noexcept { return entries.size(); }
- + - + -
+ ][ - + -
+ + + - +
- + - + +
- - + - +
- + + - -
+ + - - +
- + - + +
- - + + -
- + - + -
+ + - - +
+ - - + -
+ - + + -
- + + - -
+ - + - +
+ - - + +
- - + - +
- + - + -
+ ]
120 : : /** Get the number of transactions in the graph. Complexity: O(1). */
121 [ - + - + ]: 558219 : auto TxCount() const noexcept { return m_used.Count(); }
[ + - + -
+ + + - +
- + - + -
+ - + - +
- + - + -
- + - + -
+ - + - +
- + - + -
+ - + -
+ ]
122 : : /** Get the feerate of a given transaction i. Complexity: O(1). */
123 [ + - + + : 344656 : const FeeFrac& FeeRate(DepGraphIndex i) const noexcept { return entries[i].feerate; }
+ - + + +
- + + + -
+ + + - +
+ ]
124 : : /** Get the mutable feerate of a given transaction i. Complexity: O(1). */
125 [ + - + - ]: 460 : FeeFrac& FeeRate(DepGraphIndex i) noexcept { return entries[i].feerate; }
126 : : /** Get the ancestors of a given transaction i. Complexity: O(1). */
127 [ + - + + : 29842543 : const SetType& Ancestors(DepGraphIndex i) const noexcept { return entries[i].ancestors; }
- + ][ + +
+ + + + +
+ + + + +
+ + + + +
+ + + + +
+ - + - +
+ + - + -
+ - + - +
- + - - +
- + + - +
- - + + -
- + - + -
+ + - + -
- + - + +
- + - - +
- + + - +
- - + + -
- + - + -
+ + - ]
128 : : /** Get the descendants of a given transaction i. Complexity: O(1). */
129 [ + - ][ + + : 1879057 : const SetType& Descendants(DepGraphIndex i) const noexcept { return entries[i].descendants; }
+ + + - +
- - + - +
- + - + -
+ - + - +
- + - + -
+ - + -
+ ]
130 : :
131 : : /** Add a new unconnected transaction to this transaction graph (in the first available
132 : : * position), and return its DepGraphIndex.
133 : : *
134 : : * Complexity: O(1) (amortized, due to resizing of backing vector).
135 : : */
136 : 82086 : DepGraphIndex AddTransaction(const FeeFrac& feefrac) noexcept
137 : : {
138 : : static constexpr auto ALL_POSITIONS = SetType::Fill(SetType::Size());
139 [ + - ]: 82086 : auto available = ALL_POSITIONS - m_used;
140 [ + - ]: 130011 : Assume(available.Any());
141 : 82086 : DepGraphIndex new_idx = available.First();
142 [ - + + - ]: 82086 : if (new_idx == entries.size()) {
143 : 82086 : entries.emplace_back(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
144 : : } else {
145 : 0 : entries[new_idx] = Entry(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
146 : : }
147 : 82086 : m_used.Set(new_idx);
148 : 82086 : return new_idx;
149 : : }
150 : :
151 : : /** Remove the specified positions from this DepGraph.
152 : : *
153 : : * The specified positions will no longer be part of Positions(), and dependencies with them are
154 : : * removed. Note that due to DepGraph only tracking ancestors/descendants (and not direct
155 : : * dependencies), if a parent is removed while a grandparent remains, the grandparent will
156 : : * remain an ancestor.
157 : : *
158 : : * Complexity: O(N) where N=TxCount().
159 : : */
160 : 1108 : void RemoveTransactions(const SetType& del) noexcept
161 : : {
162 : 1108 : m_used -= del;
163 : : // Remove now-unused trailing entries.
164 [ + + - + : 5390 : while (!entries.empty() && !m_used[entries.size() - 1]) {
+ + ]
165 : 4282 : entries.pop_back();
166 : : }
167 : : // Remove the deleted transactions from ancestors/descendants of other transactions. Note
168 : : // that the deleted positions will retain old feerate and dependency information. This does
169 : : // not matter as they will be overwritten by AddTransaction if they get used again.
170 [ + + ]: 2002 : for (auto& entry : entries) {
171 : 894 : entry.ancestors &= m_used;
172 : 894 : entry.descendants &= m_used;
173 : : }
174 : 1108 : }
175 : :
176 : : /** Modify this transaction graph, adding multiple parents to a specified child.
177 : : *
178 : : * Complexity: O(N) where N=TxCount().
179 : : */
180 : 162685 : void AddDependencies(const SetType& parents, DepGraphIndex child) noexcept
181 : : {
182 [ + + ]: 162685 : Assume(m_used[child]);
183 : 258535 : Assume(parents.IsSubsetOf(m_used));
184 : : // Compute the ancestors of parents that are not already ancestors of child.
185 [ + + ]: 162685 : SetType par_anc;
186 [ + + + + ]: 851810 : for (auto par : parents - Ancestors(child)) {
187 : 1051252 : par_anc |= Ancestors(par);
188 : : }
189 [ + + ]: 162685 : par_anc -= Ancestors(child);
190 : : // Bail out if there are no such ancestors.
191 [ + + ]: 162685 : if (par_anc.None()) return;
192 : : // To each such ancestor, add as descendants the descendants of the child.
193 : 123519 : const auto& chl_des = entries[child].descendants;
194 [ + + ]: 993199 : for (auto anc_of_par : par_anc) {
195 : 1398862 : entries[anc_of_par].descendants |= chl_des;
196 : : }
197 : : // To each descendant of the child, add those ancestors.
198 [ + + + + ]: 294998 : for (auto dec_of_chl : Descendants(child)) {
199 : 199124 : entries[dec_of_chl].ancestors |= par_anc;
200 : : }
201 : : }
202 : :
203 : : /** Compute the (reduced) set of parents of node i in this graph.
204 : : *
205 : : * This returns the minimal subset of the parents of i whose ancestors together equal all of
206 : : * i's ancestors (unless i is part of a cycle of dependencies). Note that DepGraph does not
207 : : * store the set of parents; this information is inferred from the ancestor sets.
208 : : *
209 : : * Complexity: O(N) where N=Ancestors(i).Count() (which is bounded by TxCount()).
210 : : */
211 [ + + ]: 5197333 : SetType GetReducedParents(DepGraphIndex i) const noexcept
212 : : {
213 [ + + ]: 5197333 : SetType parents = Ancestors(i);
214 : 5197333 : parents.Reset(i);
215 [ + + + + : 32913370 : for (auto parent : parents) {
+ + ]
216 [ + + ]: 26194393 : if (parents[parent]) {
217 : 22131090 : parents -= Ancestors(parent);
218 : 22131090 : parents.Set(parent);
219 : : }
220 : : }
221 : 5197333 : return parents;
222 : : }
223 : :
224 : : /** Compute the (reduced) set of children of node i in this graph.
225 : : *
226 : : * This returns the minimal subset of the children of i whose descendants together equal all of
227 : : * i's descendants (unless i is part of a cycle of dependencies). Note that DepGraph does not
228 : : * store the set of children; this information is inferred from the descendant sets.
229 : : *
230 : : * Complexity: O(N) where N=Descendants(i).Count() (which is bounded by TxCount()).
231 : : */
232 [ + + ]: 50070 : SetType GetReducedChildren(DepGraphIndex i) const noexcept
233 : : {
234 [ + + ]: 50070 : SetType children = Descendants(i);
235 : 50070 : children.Reset(i);
236 [ + + + + : 293106 : for (auto child : children) {
+ + ]
237 [ + + ]: 233772 : if (children[child]) {
238 : 167192 : children -= Descendants(child);
239 : 167192 : children.Set(child);
240 : : }
241 : : }
242 : 50070 : return children;
243 : : }
244 : :
245 : : /** Compute the aggregate feerate of a set of nodes in this graph.
246 : : *
247 : : * Complexity: O(N) where N=elems.Count().
248 : : **/
249 : : FeeFrac FeeRate(const SetType& elems) const noexcept
250 : : {
251 : : FeeFrac ret;
252 : : for (auto pos : elems) ret += entries[pos].feerate;
253 : : return ret;
254 : : }
255 : :
256 : : /** Get the connected component within the subset "todo" that contains tx (which must be in
257 : : * todo).
258 : : *
259 : : * Two transactions are considered connected if they are both in `todo`, and one is an ancestor
260 : : * of the other in the entire graph (so not just within `todo`), or transitively there is a
261 : : * path of transactions connecting them. This does mean that if `todo` contains a transaction
262 : : * and a grandparent, but misses the parent, they will still be part of the same component.
263 : : *
264 : : * Complexity: O(ret.Count()).
265 : : */
266 : 225317 : SetType GetConnectedComponent(const SetType& todo, DepGraphIndex tx) const noexcept
267 : : {
268 : 225317 : Assume(todo[tx]);
269 : 225317 : Assume(todo.IsSubsetOf(m_used));
270 : 225317 : auto to_add = SetType::Singleton(tx);
271 : 225317 : SetType ret;
272 : : do {
273 : 453039 : SetType old = ret;
274 [ + - + + ]: 1685995 : for (auto add : to_add) {
275 : 779917 : ret |= Descendants(add);
276 : 779917 : ret |= Ancestors(add);
277 : : }
278 [ + + ]: 453039 : ret &= todo;
279 : 453039 : to_add = ret - old;
280 [ + + ]: 453039 : } while (to_add.Any());
281 : 225317 : return ret;
282 : : }
283 : :
284 : : /** Find some connected component within the subset "todo" of this graph.
285 : : *
286 : : * Specifically, this finds the connected component which contains the first transaction of
287 : : * todo (if any).
288 : : *
289 : : * Complexity: O(ret.Count()).
290 : : */
291 [ - + ]: 225317 : SetType FindConnectedComponent(const SetType& todo) const noexcept
292 : : {
293 [ - + ]: 225317 : if (todo.None()) return todo;
294 : 225317 : return GetConnectedComponent(todo, todo.First());
295 : : }
296 : :
297 : : /** Determine if a subset is connected.
298 : : *
299 : : * Complexity: O(subset.Count()).
300 : : */
301 : 224899 : bool IsConnected(const SetType& subset) const noexcept
302 : : {
303 [ - + ]: 224899 : return FindConnectedComponent(subset) == subset;
304 : : }
305 : :
306 : : /** Determine if this entire graph is connected.
307 : : *
308 : : * Complexity: O(TxCount()).
309 : : */
310 : : bool IsConnected() const noexcept { return IsConnected(m_used); }
311 : :
312 : : /** Append the entries of select to list in a topologically valid order.
313 : : *
314 : : * Complexity: O(select.Count() * log(select.Count())).
315 : : */
316 : : void AppendTopo(std::vector<DepGraphIndex>& list, const SetType& select) const noexcept
317 : : {
318 : : DepGraphIndex old_len = list.size();
319 : : for (auto i : select) list.push_back(i);
320 : : std::ranges::sort(std::span{list}.subspan(old_len), [&](DepGraphIndex a, DepGraphIndex b) noexcept {
321 : : const auto a_anc_count = entries[a].ancestors.Count();
322 : : const auto b_anc_count = entries[b].ancestors.Count();
323 : : if (a_anc_count != b_anc_count) return a_anc_count < b_anc_count;
324 : : return a < b;
325 : : });
326 : : }
327 : :
328 : : /** Check if this graph is acyclic. */
329 : 53947 : bool IsAcyclic() const noexcept
330 : : {
331 [ + + + - : 357249 : for (auto i : Positions()) {
+ + ]
332 [ + - ]: 249935 : if ((Ancestors(i) & Descendants(i)) != SetType::Singleton(i)) {
333 : : return false;
334 : : }
335 : : }
336 : : return true;
337 : : }
338 : :
339 : : unsigned CountDependencies() const noexcept
340 : : {
341 : : unsigned ret = 0;
342 : : for (auto i : Positions()) {
343 : : ret += GetReducedParents(i).Count();
344 : : }
345 : : return ret;
346 : : }
347 : :
348 : : /** Reduce memory usage if possible. No observable effect. */
349 : 6219 : void Compact() noexcept
350 : : {
351 : 6219 : entries.shrink_to_fit();
352 : : }
353 : :
354 : 65949 : size_t DynamicMemoryUsage() const noexcept
355 : : {
356 [ - + ]: 65949 : return memusage::DynamicUsage(entries);
357 : : }
358 : : };
359 : :
360 : : /** A set of transactions together with their aggregate feerate. */
361 : : template<typename SetType>
362 : : struct SetInfo
363 : : {
364 : : /** The transactions in the set. */
365 : : SetType transactions;
366 : : /** Their combined fee and size. */
367 : : FeeFrac feerate;
368 : :
369 : : /** Construct a SetInfo for the empty set. */
370 : 5071714 : SetInfo() noexcept = default;
371 : :
372 : : /** Construct a SetInfo for a specified set and feerate. */
373 : : SetInfo(const SetType& txn, const FeeFrac& fr) noexcept : transactions(txn), feerate(fr) {}
374 : :
375 : : /** Construct a SetInfo for a given transaction in a depgraph. */
376 : 5369390 : explicit SetInfo(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept :
377 : 5369390 : transactions(SetType::Singleton(pos)), feerate(depgraph.FeeRate(pos)) {}
378 : :
379 : : /** Construct a SetInfo for a set of transactions in a depgraph. */
380 : : explicit SetInfo(const DepGraph<SetType>& depgraph, const SetType& txn) noexcept :
381 : : transactions(txn), feerate(depgraph.FeeRate(txn)) {}
382 : :
383 : : /** Add a transaction to this SetInfo (which must not yet be in it). */
384 : : void Set(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept
385 : : {
386 : : Assume(!transactions[pos]);
387 : : transactions.Set(pos);
388 : : feerate += depgraph.FeeRate(pos);
389 : : }
390 : :
391 : : /** Add the transactions of other to this SetInfo (no overlap allowed). */
392 : 35464338 : SetInfo& operator|=(const SetInfo& other) noexcept
393 : : {
394 : 58275722 : Assume(!transactions.Overlaps(other.transactions));
395 : 35464338 : transactions |= other.transactions;
396 : 35464338 : feerate += other.feerate;
397 : 35464338 : return *this;
398 : : }
399 : :
400 : : /** Remove the transactions of other from this SetInfo (which must be a subset). */
401 : 15865664 : SetInfo& operator-=(const SetInfo& other) noexcept
402 : : {
403 : 26065705 : Assume(other.transactions.IsSubsetOf(transactions));
404 : 15865664 : transactions -= other.transactions;
405 : 15865664 : feerate -= other.feerate;
406 : 15865664 : return *this;
407 : : }
408 : :
409 : : /** Compute the difference between this and other SetInfo (which must be a subset). */
410 : : SetInfo operator-(const SetInfo& other) const noexcept
411 : : {
412 : : Assume(other.transactions.IsSubsetOf(transactions));
413 : : return {transactions - other.transactions, feerate - other.feerate};
414 : : }
415 : :
416 : : /** Swap two SetInfo objects. */
417 : : friend void swap(SetInfo& a, SetInfo& b) noexcept
418 : : {
419 : : swap(a.transactions, b.transactions);
420 : : swap(a.feerate, b.feerate);
421 : : }
422 : :
423 : : /** Permit equality testing. */
424 : : friend bool operator==(const SetInfo&, const SetInfo&) noexcept = default;
425 : : };
426 : :
427 : : /** Compute the chunks of linearization as SetInfos. */
428 : : template<typename SetType>
429 : 58631 : std::vector<SetInfo<SetType>> ChunkLinearizationInfo(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
430 : : {
431 : 58631 : std::vector<SetInfo<SetType>> ret;
432 [ + + ]: 356307 : for (DepGraphIndex i : linearization) {
433 : : /** The new chunk to be added, initially a singleton. */
434 : 297676 : SetInfo<SetType> new_chunk(depgraph, i);
435 : : // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
436 [ + + + + ]: 324845 : while (!ret.empty() && ByRatio{new_chunk.feerate} > ByRatio{ret.back().feerate}) {
437 : 27169 : new_chunk |= ret.back();
438 : 27169 : ret.pop_back();
439 : : }
440 : : // Actually move that new chunk into the chunking.
441 : 297676 : ret.emplace_back(std::move(new_chunk));
442 : : }
443 : 58631 : return ret;
444 : : }
445 : :
446 : : /** Compute the feerates of the chunks of linearization. Identical to ChunkLinearizationInfo, but
447 : : * only returns the chunk feerates, not the corresponding transaction sets. */
448 : : template<typename SetType>
449 : 414 : std::vector<FeeFrac> ChunkLinearization(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
450 : : {
451 : 414 : std::vector<FeeFrac> ret;
452 [ + + ]: 2556 : for (DepGraphIndex i : linearization) {
453 : : /** The new chunk to be added, initially a singleton. */
454 : 2142 : auto new_chunk = depgraph.FeeRate(i);
455 : : // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
456 [ + + + + ]: 3021 : while (!ret.empty() && ByRatio{new_chunk} > ByRatio{ret.back()}) {
457 : 879 : new_chunk += ret.back();
458 : 879 : ret.pop_back();
459 : : }
460 : : // Actually move that new chunk into the chunking.
461 : 2142 : ret.push_back(std::move(new_chunk));
462 : : }
463 : 414 : return ret;
464 : : }
465 : :
466 : : /** Concept for function objects that return std::strong_ordering when invoked with two Args. */
467 : : template<typename F, typename Arg>
468 : : concept StrongComparator =
469 : : std::regular_invocable<F, Arg, Arg> &&
470 : : std::is_same_v<std::invoke_result_t<F, Arg, Arg>, std::strong_ordering>;
471 : :
472 : : /** Simple default transaction ordering function for SpanningForestState::GetLinearization() and
473 : : * Linearize(), which just sorts by DepGraphIndex. */
474 : : using IndexTxOrder = std::compare_three_way;
475 : :
476 : : /** A default cost model for SFL for SetType=BitSet<64>, based on benchmarks.
477 : : *
478 : : * The numbers here were obtained in February 2026 by:
479 : : * - For a variety of machines:
480 : : * - Running a fixed collection of ~385000 clusters found through random generation and fuzzing,
481 : : * optimizing for difficulty of linearization.
482 : : * - Linearize each ~3000 times, with different random seeds. Sometimes without input
483 : : * linearization, sometimes with a bad one.
484 : : * - Gather cycle counts for each of the operations included in this cost model,
485 : : * broken down by their parameters.
486 : : * - Correct the data by subtracting the runtime of obtaining the cycle count.
487 : : * - Drop the 5% top and bottom samples from each cycle count dataset, and compute the average
488 : : * of the remaining samples.
489 : : * - For each operation, fit a least-squares linear function approximation through the samples.
490 : : * - Rescale all machine expressions to make their total time match, as we only care about
491 : : * relative cost of each operation.
492 : : * - Take the per-operation average of operation expressions across all machines, to construct
493 : : * expressions for an average machine.
494 : : * - Approximate the result with integer coefficients. Each cost unit corresponds to somewhere
495 : : * between 0.5 ns and 2.5 ns, depending on the hardware.
496 : : */
497 : : class SFLDefaultCostModel
498 : : {
499 : : uint64_t m_cost{0};
500 : :
501 : : public:
502 : 191345 : inline void InitializeBegin() noexcept {}
503 : 191345 : inline void InitializeEnd(int num_txns, int num_deps) noexcept
504 : : {
505 : : // Cost of initialization.
506 : 191345 : m_cost += 39 * num_txns;
507 : : // Cost of producing linearization at the end.
508 : 191345 : m_cost += 48 * num_txns + 4 * num_deps;
509 : : }
510 : : inline void GetLinearizationBegin() noexcept {}
511 : : inline void GetLinearizationEnd(int num_txns, int num_deps) noexcept
512 : : {
513 : : // Note that we account for the cost of the final linearization at the beginning (see
514 : : // InitializeEnd), because the cost budget decision needs to be made before calling
515 : : // GetLinearization.
516 : : // This function exists here to allow overriding it easily for benchmark purposes.
517 : : }
518 : : inline void MakeTopologicalBegin() noexcept {}
519 : 98874 : inline void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
520 : : {
521 : 98874 : m_cost += 20 * num_chunks + 28 * num_steps;
522 : : }
523 : : inline void StartOptimizingBegin() noexcept {}
524 : 191345 : inline void StartOptimizingEnd(int num_chunks) noexcept { m_cost += 13 * num_chunks; }
525 : : inline void ActivateBegin() noexcept {}
526 : 4589800 : inline void ActivateEnd(int num_deps) noexcept { m_cost += 10 * num_deps + 1; }
527 : : inline void DeactivateBegin() noexcept {}
528 : 1309630 : inline void DeactivateEnd(int num_deps) noexcept { m_cost += 11 * num_deps + 8; }
529 : : inline void MergeChunksBegin() noexcept {}
530 : 4589800 : inline void MergeChunksMid(int num_txns) noexcept { m_cost += 2 * num_txns; }
531 : 4589800 : inline void MergeChunksEnd(int num_steps) noexcept { m_cost += 3 * num_steps + 5; }
532 : : inline void PickMergeCandidateBegin() noexcept {}
533 : 9215362 : inline void PickMergeCandidateEnd(int num_steps) noexcept { m_cost += 8 * num_steps; }
534 : : inline void PickChunkToOptimizeBegin() noexcept {}
535 : 2558244 : inline void PickChunkToOptimizeEnd(int num_steps) noexcept { m_cost += num_steps + 4; }
536 : : inline void PickDependencyToSplitBegin() noexcept {}
537 : 2558244 : inline void PickDependencyToSplitEnd(int num_txns) noexcept { m_cost += 8 * num_txns + 9; }
538 : : inline void StartMinimizingBegin() noexcept {}
539 : 191345 : inline void StartMinimizingEnd(int num_chunks) noexcept { m_cost += 18 * num_chunks; }
540 : : inline void MinimizeStepBegin() noexcept {}
541 : 2129147 : inline void MinimizeStepMid(int num_txns) noexcept { m_cost += 11 * num_txns + 11; }
542 : 286763 : inline void MinimizeStepEnd(bool split) noexcept { m_cost += 17 * split + 7; }
543 : :
544 : 5070081 : inline uint64_t GetCost() const noexcept { return m_cost; }
545 : : };
546 : :
547 : : /** Class to represent the internal state of the spanning-forest linearization (SFL) algorithm.
548 : : *
549 : : * At all times, each dependency is marked as either "active" or "inactive". The subset of active
550 : : * dependencies is the state of the SFL algorithm. The implementation maintains several other
551 : : * values to speed up operations, but everything is ultimately a function of what that subset of
552 : : * active dependencies is.
553 : : *
554 : : * Given such a subset, define a chunk as the set of transactions that are connected through active
555 : : * dependencies (ignoring their parent/child direction). Thus, every state implies a particular
556 : : * partitioning of the graph into chunks (including potential singletons). In the extreme, each
557 : : * transaction may be in its own chunk, or in the other extreme all transactions may form a single
558 : : * chunk. A chunk's feerate is its total fee divided by its total size.
559 : : *
560 : : * The algorithm consists of switching dependencies between active and inactive. The final
561 : : * linearization that is produced at the end consists of these chunks, sorted from high to low
562 : : * feerate, each individually sorted in an arbitrary but topological (= no child before parent)
563 : : * way.
564 : : *
565 : : * We define four quality properties the state can have:
566 : : *
567 : : * - acyclic: The state is acyclic whenever no cycle of active dependencies exists within the
568 : : * graph, ignoring the parent/child direction. This is equivalent to saying that within
569 : : * each chunk the set of active dependencies form a tree, and thus the overall set of
570 : : * active dependencies in the graph form a spanning forest, giving the algorithm its
571 : : * name. Being acyclic is also equivalent to every chunk of N transactions having
572 : : * exactly N-1 active dependencies.
573 : : *
574 : : * For example in a diamond graph, D->{B,C}->A, the 4 dependencies cannot be
575 : : * simultaneously active. If at least one is inactive, the state is acyclic.
576 : : *
577 : : * The algorithm maintains an acyclic state at *all* times as an invariant. This implies
578 : : * that activating a dependency always corresponds to merging two chunks, and that
579 : : * deactivating one always corresponds to splitting two chunks.
580 : : *
581 : : * - topological: We say the state is topological whenever it is acyclic and no inactive dependency
582 : : * exists between two distinct chunks such that the child chunk has higher or equal
583 : : * feerate than the parent chunk.
584 : : *
585 : : * The relevance is that whenever the state is topological, the produced output
586 : : * linearization will be topological too (i.e., not have children before parents).
587 : : * Note that the "or equal" part of the definition matters: if not, one can end up
588 : : * in a situation with mutually-dependent equal-feerate chunks that cannot be
589 : : * linearized. For example C->{A,B} and D->{A,B}, with C->A and D->B active. The AC
590 : : * chunk depends on DB through C->B, and the BD chunk depends on AC through D->A.
591 : : * Merging them into a single ABCD chunk fixes this.
592 : : *
593 : : * The algorithm attempts to keep the state topological as much as possible, so it
594 : : * can be interrupted to produce an output whenever, but will sometimes need to
595 : : * temporarily deviate from it when improving the state.
596 : : *
597 : : * - optimal: For every active dependency, define its top and bottom set as the set of transactions
598 : : * in the chunks that would result if the dependency were deactivated; the top being the
599 : : * one with the dependency's parent, and the bottom being the one with the child. Note
600 : : * that due to acyclicity, every deactivation splits a chunk exactly in two.
601 : : *
602 : : * We say the state is optimal whenever it is topological and it has no active
603 : : * dependency whose top feerate is strictly higher than its bottom feerate. The
604 : : * relevance is that it can be proven that whenever the state is optimal, the produced
605 : : * linearization will also be optimal (in the convexified feerate diagram sense). It can
606 : : * also be proven that for every graph at least one optimal state exists.
607 : : *
608 : : * Note that it is possible for the SFL state to not be optimal, but the produced
609 : : * linearization to still be optimal. This happens when the chunks of a state are
610 : : * identical to those of an optimal state, but the exact set of active dependencies
611 : : * within a chunk differ in such a way that the state optimality condition is not
612 : : * satisfied. Thus, the state being optimal is more a "the eventual output is *known*
613 : : * to be optimal".
614 : : *
615 : : * - minimal: We say the state is minimal when it is:
616 : : * - acyclic
617 : : * - topological, except that inactive dependencies between equal-feerate chunks are
618 : : * allowed as long as they do not form a loop.
619 : : * - like optimal, no active dependencies whose top feerate is strictly higher than
620 : : * the bottom feerate are allowed.
621 : : * - no chunk contains a proper non-empty subset which includes all its own in-chunk
622 : : * dependencies of the same feerate as the chunk itself.
623 : : *
624 : : * A minimal state effectively corresponds to an optimal state, where every chunk has
625 : : * been split into its minimal equal-feerate components.
626 : : *
627 : : * The algorithm terminates whenever a minimal state is reached.
628 : : *
629 : : *
630 : : * This leads to the following high-level algorithm:
631 : : * - Start with all dependencies inactive, and thus all transactions in their own chunk. This is
632 : : * definitely acyclic.
633 : : * - Activate dependencies (merging chunks) until the state is topological.
634 : : * - Loop until optimal (no dependencies with higher-feerate top than bottom), or time runs out:
635 : : * - Deactivate a violating dependency, potentially making the state non-topological.
636 : : * - Activate other dependencies to make the state topological again.
637 : : * - If there is time left and the state is optimal:
638 : : * - Attempt to split chunks into equal-feerate parts without mutual dependencies between them.
639 : : * When this succeeds, recurse into them.
640 : : * - If no such chunks can be found, the state is minimal.
641 : : * - Output the chunks from high to low feerate, each internally sorted topologically.
642 : : *
643 : : * When merging, we always either:
644 : : * - Merge upwards: merge a chunk with the lowest-feerate other chunk it depends on, among those
645 : : * with lower or equal feerate than itself.
646 : : * - Merge downwards: merge a chunk with the highest-feerate other chunk that depends on it, among
647 : : * those with higher or equal feerate than itself.
648 : : *
649 : : * Using these strategies in the improvement loop above guarantees that the output linearization
650 : : * after a deactivate + merge step is never worse or incomparable (in the convexified feerate
651 : : * diagram sense) than the output linearization that would be produced before the step. With that,
652 : : * we can refine the high-level algorithm to:
653 : : * - Start with all dependencies inactive.
654 : : * - Perform merges as described until none are possible anymore, making the state topological.
655 : : * - Loop until optimal or time runs out:
656 : : * - Pick a dependency D to deactivate among those with higher feerate top than bottom.
657 : : * - Deactivate D, causing the chunk it is in to split into top T and bottom B.
658 : : * - Do an upwards merge of T, if possible. If so, repeat the same with the merged result.
659 : : * - Do a downwards merge of B, if possible. If so, repeat the same with the merged result.
660 : : * - Split chunks further to obtain a minimal state, see below.
661 : : * - Output the chunks from high to low feerate, each internally sorted topologically.
662 : : *
663 : : * Instead of performing merges arbitrarily to make the initial state topological, it is possible
664 : : * to do so guided by an existing linearization. This has the advantage that the state's would-be
665 : : * output linearization is immediately as good as the existing linearization it was based on:
666 : : * - Start with all dependencies inactive.
667 : : * - For each transaction t in the existing linearization:
668 : : * - Find the chunk C that transaction is in (which will be singleton).
669 : : * - Do an upwards merge of C, if possible. If so, repeat the same with the merged result.
670 : : * No downwards merges are needed in this case.
671 : : *
672 : : * After reaching an optimal state, it can be transformed into a minimal state by attempting to
673 : : * split chunks further into equal-feerate parts. To do so, pick a specific transaction in each
674 : : * chunk (the pivot), and rerun the above split-then-merge procedure again:
675 : : * - first, while pretending the pivot transaction has an infinitesimally higher (or lower) fee
676 : : * than it really has. If a split exists with the pivot in the top part (or bottom part), this
677 : : * will find it.
678 : : * - if that fails to split, repeat while pretending the pivot transaction has an infinitesimally
679 : : * lower (or higher) fee. If a split exists with the pivot in the bottom part (or top part), this
680 : : * will find it.
681 : : * - if either succeeds, repeat the procedure for the newly found chunks to split them further.
682 : : * If not, the chunk is already minimal.
683 : : * If the chunk can be split into equal-feerate parts, then the pivot must exist in either the top
684 : : * or bottom part of that potential split. By trying both with the same pivot, if a split exists,
685 : : * it will be found.
686 : : *
687 : : * What remains to be specified are a number of heuristics:
688 : : *
689 : : * - How to decide which chunks to merge:
690 : : * - The merge upwards and downward rules specify that the lowest-feerate respectively
691 : : * highest-feerate candidate chunk is merged with, but if there are multiple equal-feerate
692 : : * candidates, a uniformly random one among them is picked.
693 : : *
694 : : * - How to decide what dependency to activate (when merging chunks):
695 : : * - After picking two chunks to be merged (see above), a uniformly random dependency between the
696 : : * two chunks is activated.
697 : : *
698 : : * - How to decide which chunk to find a dependency to split in:
699 : : * - A round-robin queue of chunks to improve is maintained. The initial ordering of this queue
700 : : * is uniformly randomly permuted.
701 : : *
702 : : * - How to decide what dependency to deactivate (when splitting chunks):
703 : : * - Inside the selected chunk (see above), among the dependencies whose top feerate is strictly
704 : : * higher than its bottom feerate in the selected chunk, if any, a uniformly random dependency
705 : : * is deactivated.
706 : : * - After every split, it is possible that the top and the bottom chunk merge with each other
707 : : * again in the merge sequence (through a top->bottom dependency, not through the deactivated
708 : : * one, which was bottom->top). Call this a self-merge. If a self-merge does not occur after
709 : : * a split, the resulting linearization is strictly improved (the area under the convexified
710 : : * feerate diagram increases by at least gain/2), while self-merges do not change it.
711 : : *
712 : : * - How to decide the exact output linearization:
713 : : * - When there are multiple equal-feerate chunks with no dependencies between them, pick the
714 : : * smallest one first. If there are multiple smallest ones, pick the one that contains the
715 : : * last transaction (according to the provided fallback order) last (note that this is not the
716 : : * same as picking the chunk with the first transaction first).
717 : : * - Within chunks, pick among all transactions without missing dependencies the one with the
718 : : * highest individual feerate. If there are multiple ones with the same individual feerate,
719 : : * pick the smallest first. If there are multiple with the same fee and size, pick the one
720 : : * that sorts first according to the fallback order first.
721 : : */
722 : : template<typename SetType, typename CostModel = SFLDefaultCostModel>
723 : : class SpanningForestState
724 : : {
725 : : private:
726 : : /** Internal RNG. */
727 : : InsecureRandomContext m_rng;
728 : :
729 : : /** Data type to represent indexing into m_tx_data. */
730 : : using TxIdx = DepGraphIndex;
731 : : /** Data type to represent indexing into m_set_info. Use the smallest type possible to improve
732 : : * cache locality. */
733 : : using SetIdx = std::conditional_t<(SetType::Size() <= 0xff),
734 : : uint8_t,
735 : : std::conditional_t<(SetType::Size() <= 0xffff),
736 : : uint16_t,
737 : : uint32_t>>;
738 : : /** An invalid SetIdx. */
739 : : static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1);
740 : :
741 : : /** Structure with information about a single transaction. */
742 : 5101714 : struct TxData {
743 : : /** The top set for every active child dependency this transaction has, indexed by child
744 : : * TxIdx. Only defined for indexes in active_children. */
745 : : std::array<SetIdx, SetType::Size()> dep_top_idx;
746 : : /** The set of parent transactions of this transaction. Immutable after construction. */
747 : : SetType parents;
748 : : /** The set of child transactions of this transaction. Immutable after construction. */
749 : : SetType children;
750 : : /** The set of child transactions reachable through an active dependency. */
751 : : SetType active_children;
752 : : /** Which chunk this transaction belongs to. */
753 : : SetIdx chunk_idx;
754 : : };
755 : :
756 : : /** The set of all TxIdx's of transactions in the cluster indexing into m_tx_data. */
757 : : SetType m_transaction_idxs;
758 : : /** The set of all chunk SetIdx's. This excludes the SetIdxs that refer to active
759 : : * dependencies' tops. */
760 : : SetType m_chunk_idxs;
761 : : /** The set of all SetIdx's that appear in m_suboptimal_chunks. Note that they do not need to
762 : : * be chunks: some of these sets may have been converted to a dependency's top set since being
763 : : * added to m_suboptimal_chunks. */
764 : : SetType m_suboptimal_idxs;
765 : : /** Information about each transaction (and chunks). Keeps the "holes" from DepGraph during
766 : : * construction. Indexed by TxIdx. */
767 : : std::vector<TxData> m_tx_data;
768 : : /** Information about each set (chunk, or active dependency top set). Indexed by SetIdx. */
769 : : std::vector<SetInfo<SetType>> m_set_info;
770 : : /** For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions, in the
771 : : * upwards (.first) and downwards (.second) direction. */
772 : : std::vector<std::pair<SetType, SetType>> m_reachable;
773 : : /** A FIFO of chunk SetIdxs for chunks that may be improved still. */
774 : : VecDeque<SetIdx> m_suboptimal_chunks;
775 : : /** A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their
776 : : * status:
777 : : * - bit 1: currently attempting to move the pivot down, rather than up.
778 : : * - bit 2: this is the second stage, so we have already tried moving the pivot in the other
779 : : * direction.
780 : : */
781 : : VecDeque<std::tuple<SetIdx, TxIdx, unsigned>> m_nonminimal_chunks;
782 : :
783 : : /** The DepGraph we are trying to linearize. */
784 : : const DepGraph<SetType>& m_depgraph;
785 : :
786 : : /** Accounting for the cost of this computation. */
787 : : CostModel m_cost;
788 : :
789 : : /** Pick a random transaction within a set (which must be non-empty). */
790 : 1791544 : TxIdx PickRandomTx(const SetType& tx_idxs) noexcept
791 : : {
792 : 2898944 : Assume(tx_idxs.Any());
793 : 1791544 : unsigned pos = m_rng.randrange<unsigned>(tx_idxs.Count());
794 [ + - + - ]: 4390604 : for (auto tx_idx : tx_idxs) {
795 [ + + ]: 3706460 : if (pos == 0) return tx_idx;
796 : 1914916 : --pos;
797 : : }
798 : 0 : Assume(false);
799 : 0 : return TxIdx(-1);
800 : : }
801 : :
802 : : /** Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and
803 : : * downwards direction. Only used by SanityCheck to verify the precomputed reachable sets in
804 : : * m_reachable that are maintained by Activate/Deactivate. */
805 : : std::pair<SetType, SetType> GetReachable(const SetType& tx_idxs) const noexcept
806 : : {
807 : : SetType parents, children;
808 : : for (auto tx_idx : tx_idxs) {
809 : : const auto& tx_data = m_tx_data[tx_idx];
810 : : parents |= tx_data.parents;
811 : : children |= tx_data.children;
812 : : }
813 : : return {parents - tx_idxs, children - tx_idxs};
814 : : }
815 : :
816 : : /** Make the inactive dependency from child to parent, which must not be in the same chunk
817 : : * already, active. Returns the merged chunk idx. */
818 : 4589800 : SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
819 : : {
820 : : m_cost.ActivateBegin();
821 : : // Gather and check information about the parent and child transactions.
822 [ + - ]: 4589800 : auto& parent_data = m_tx_data[parent_idx];
823 : 4589800 : auto& child_data = m_tx_data[child_idx];
824 [ + - ]: 4589800 : Assume(parent_data.children[child_idx]);
825 : 4589800 : Assume(!parent_data.active_children[child_idx]);
826 : : // Get the set index of the chunks the parent and child are currently in. The parent chunk
827 : : // will become the top set of the newly activated dependency, while the child chunk will be
828 : : // grown to become the merged chunk.
829 : 4589800 : auto parent_chunk_idx = parent_data.chunk_idx;
830 : 4589800 : auto child_chunk_idx = child_data.chunk_idx;
831 : 4589800 : Assume(parent_chunk_idx != child_chunk_idx);
832 : 4589800 : Assume(m_chunk_idxs[parent_chunk_idx]);
833 : 4589800 : Assume(m_chunk_idxs[child_chunk_idx]);
834 : 4589800 : auto& top_info = m_set_info[parent_chunk_idx];
835 : 4589800 : auto& bottom_info = m_set_info[child_chunk_idx];
836 : :
837 : : // Consider the following example:
838 : : //
839 : : // A A There are two chunks, ABC and DEF, and the inactive E->C dependency
840 : : // / \ / \ is activated, resulting in a single chunk ABCDEF.
841 : : // B C B C
842 : : // : ==> | Dependency | top set before | top set after | change
843 : : // D E D E B->A | AC | ACDEF | +DEF
844 : : // \ / \ / C->A | AB | AB |
845 : : // F F F->D | D | D |
846 : : // F->E | E | ABCE | +ABC
847 : : //
848 : : // The common pattern here is that any dependency which has the parent or child of the
849 : : // dependency being activated (E->C here) in its top set, will have the opposite part added
850 : : // to it. This is true for B->A and F->E, but not for C->A and F->D.
851 : : //
852 : : // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to
853 : : // every dependency's top set which has the parent (C) in it. At the same time, change the
854 : : // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk.
855 [ + + + + ]: 45421069 : for (auto tx_idx : top_info.transactions) {
856 [ + + ]: 39137385 : auto& tx_data = m_tx_data[tx_idx];
857 : 39137385 : tx_data.chunk_idx = child_chunk_idx;
858 [ + + + + ]: 79316665 : for (auto dep_child_idx : tx_data.active_children) {
859 [ + + ]: 34547585 : auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
860 [ + + ]: 34547585 : if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info;
861 : : }
862 : : }
863 : : // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to
864 : : // every dependency's top set which has the child (E) in it.
865 [ + + + + ]: 26306443 : for (auto tx_idx : bottom_info.transactions) {
866 [ + + ]: 20022759 : auto& tx_data = m_tx_data[tx_idx];
867 [ + + + + ]: 39146882 : for (auto dep_child_idx : tx_data.active_children) {
868 [ + + ]: 15432959 : auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
869 [ + + ]: 15432959 : if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info;
870 : : }
871 : : }
872 : : // Merge top_info into bottom_info, which becomes the merged chunk.
873 : 4589800 : bottom_info |= top_info;
874 : : // Compute merged sets of reachable transactions from the new chunk, based on the input
875 : : // chunks' reachable sets.
876 : 4589800 : m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first;
877 : 4589800 : m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second;
878 : 4589800 : m_reachable[child_chunk_idx].first -= bottom_info.transactions;
879 : 4589800 : m_reachable[child_chunk_idx].second -= bottom_info.transactions;
880 : : // Make parent chunk the set for the new active dependency.
881 : 4589800 : parent_data.dep_top_idx[child_idx] = parent_chunk_idx;
882 : 4589800 : parent_data.active_children.Set(child_idx);
883 : 4589800 : m_chunk_idxs.Reset(parent_chunk_idx);
884 : : // Return the newly merged chunk.
885 : 4589800 : m_cost.ActivateEnd(/*num_deps=*/bottom_info.transactions.Count() - 1);
886 : 4589800 : return child_chunk_idx;
887 : : }
888 : :
889 : : /** Make a specified active dependency inactive. Returns the created parent and child chunk
890 : : * indexes. */
891 : 1309630 : std::pair<SetIdx, SetIdx> Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
892 : : {
893 : : m_cost.DeactivateBegin();
894 : : // Gather and check information about the parent transactions.
895 : 1309630 : auto& parent_data = m_tx_data[parent_idx];
896 : 1309630 : Assume(parent_data.children[child_idx]);
897 : 1309630 : Assume(parent_data.active_children[child_idx]);
898 : : // Get the top set of the active dependency (which will become the parent chunk) and the
899 : : // chunk set the transactions are currently in (which will become the bottom chunk).
900 : 1309630 : auto parent_chunk_idx = parent_data.dep_top_idx[child_idx];
901 : 1309630 : auto child_chunk_idx = parent_data.chunk_idx;
902 : 1309630 : Assume(parent_chunk_idx != child_chunk_idx);
903 : 1309630 : Assume(m_chunk_idxs[child_chunk_idx]);
904 : 1309630 : Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk
905 : 1309630 : auto& top_info = m_set_info[parent_chunk_idx];
906 : 1309630 : auto& bottom_info = m_set_info[child_chunk_idx];
907 : :
908 : : // Remove the active dependency.
909 : 1309630 : parent_data.active_children.Reset(child_idx);
910 : 1309630 : m_chunk_idxs.Set(parent_chunk_idx);
911 : 1309630 : auto ntx = bottom_info.transactions.Count();
912 : : // Subtract the top_info from the bottom_info, as it will become the child chunk.
913 : 1309630 : bottom_info -= top_info;
914 : : // See the comment above in Activate(). We perform the opposite operations here, removing
915 : : // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children.
916 : 1309630 : SetType top_parents, top_children;
917 [ + + + + ]: 17699120 : for (auto tx_idx : top_info.transactions) {
918 [ + + ]: 15888176 : auto& tx_data = m_tx_data[tx_idx];
919 : 15888176 : tx_data.chunk_idx = parent_chunk_idx;
920 [ + + ]: 76476601 : top_parents |= tx_data.parents;
921 : 15888176 : top_children |= tx_data.children;
922 [ + + + + ]: 33271845 : for (auto dep_child_idx : tx_data.active_children) {
923 [ + + ]: 14578546 : auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
924 [ + + ]: 14578546 : if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info;
925 : : }
926 : : }
927 : 1309630 : SetType bottom_parents, bottom_children;
928 [ + + + + ]: 14121248 : for (auto tx_idx : bottom_info.transactions) {
929 [ + + ]: 12310304 : auto& tx_data = m_tx_data[tx_idx];
930 [ + + ]: 59663669 : bottom_parents |= tx_data.parents;
931 : 12310304 : bottom_children |= tx_data.children;
932 [ + + + + ]: 25675905 : for (auto dep_child_idx : tx_data.active_children) {
933 [ + + ]: 11000674 : auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
934 [ + + ]: 11000674 : if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info;
935 : : }
936 : : }
937 : : // Compute the new sets of reachable transactions for each new chunk, based on the
938 : : // top/bottom parents and children computed above.
939 : 1309630 : m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions;
940 : 1309630 : m_reachable[parent_chunk_idx].second = top_children - top_info.transactions;
941 : 1309630 : m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions;
942 : 1309630 : m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions;
943 : : // Return the two new set idxs.
944 : 1309630 : m_cost.DeactivateEnd(/*num_deps=*/ntx - 1);
945 : 1309630 : return {parent_chunk_idx, child_chunk_idx};
946 : : }
947 : :
948 : : /** Activate a dependency from the bottom set to the top set, which must exist. Return the
949 : : * index of the merged chunk. */
950 : 4589800 : SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
951 : : {
952 : : m_cost.MergeChunksBegin();
953 [ + - ]: 4589800 : Assume(m_chunk_idxs[top_idx]);
954 : 4589800 : Assume(m_chunk_idxs[bottom_idx]);
955 [ + - ]: 4589800 : auto& top_chunk_info = m_set_info[top_idx];
956 : 4589800 : auto& bottom_chunk_info = m_set_info[bottom_idx];
957 : : // Count the number of dependencies between bottom_chunk and top_chunk.
958 : 4589800 : unsigned num_deps{0};
959 [ + + + + ]: 45421069 : for (auto tx_idx : top_chunk_info.transactions) {
960 : 39137385 : auto& tx_data = m_tx_data[tx_idx];
961 : 39137385 : num_deps += (tx_data.children & bottom_chunk_info.transactions).Count();
962 : : }
963 : 4589800 : m_cost.MergeChunksMid(/*num_txns=*/top_chunk_info.transactions.Count());
964 : 4589800 : Assume(num_deps > 0);
965 : : // Uniformly randomly pick one of them and activate it.
966 : 4589800 : unsigned pick = m_rng.randrange(num_deps);
967 : 4589800 : unsigned num_steps = 0;
968 [ + - + - ]: 18117805 : for (auto tx_idx : top_chunk_info.transactions) {
969 : 16423921 : ++num_steps;
970 [ + + ]: 16423921 : auto& tx_data = m_tx_data[tx_idx];
971 [ + + ]: 16423921 : auto intersect = tx_data.children & bottom_chunk_info.transactions;
972 : 16423921 : auto count = intersect.Count();
973 [ + + ]: 16423921 : if (pick < count) {
974 [ + - + - ]: 7706839 : for (auto child_idx : intersect) {
975 [ + + ]: 6012955 : if (pick == 0) {
976 : 4589800 : m_cost.MergeChunksEnd(/*num_steps=*/num_steps);
977 : 4589800 : return Activate(tx_idx, child_idx);
978 : : }
979 : 1423155 : --pick;
980 : : }
981 : 0 : Assume(false);
982 : : break;
983 : : }
984 : 11834121 : pick -= count;
985 : : }
986 : 0 : Assume(false);
987 : 0 : return INVALID_SET_IDX;
988 : : }
989 : :
990 : : /** Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency
991 : : * from merge_chunk_idx to chunk_idx (if DownWard). Return the index of the merged chunk. */
992 : : template<bool DownWard>
993 : 3784757 : SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
994 : : {
995 : : if constexpr (DownWard) {
996 : 488166 : return MergeChunks(chunk_idx, merge_chunk_idx);
997 : : } else {
998 : 3296591 : return MergeChunks(merge_chunk_idx, chunk_idx);
999 : : }
1000 : : }
1001 : :
1002 : : /** Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none. */
1003 : : template<bool DownWard>
1004 : 9215362 : SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept
1005 : : {
1006 : : m_cost.PickMergeCandidateBegin();
1007 : : /** Information about the chunk. */
1008 : 9215362 : Assume(m_chunk_idxs[chunk_idx]);
1009 : 9215362 : auto& chunk_info = m_set_info[chunk_idx];
1010 : : // Iterate over all chunks reachable from this one. For those depended-on chunks,
1011 : : // remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one.
1012 : : // If multiple equal-feerate candidate chunks to merge with exist, pick a random one
1013 : : // among them.
1014 : :
1015 : : /** The minimum feerate (if downward) or maximum feerate (if upward) to consider when
1016 : : * looking for candidate chunks to merge with. Initially, this is the original chunk's
1017 : : * feerate, but is updated to be the current best candidate whenever one is found. */
1018 : 9215362 : FeeFrac best_other_chunk_feerate = chunk_info.feerate;
1019 : : /** The chunk index for the best candidate chunk to merge with. INVALID_SET_IDX if none. */
1020 : 9215362 : SetIdx best_other_chunk_idx = INVALID_SET_IDX;
1021 : : /** We generate random tiebreak values to pick between equal-feerate candidate chunks.
1022 : : * This variable stores the tiebreak of the current best candidate. */
1023 : 9215362 : uint64_t best_other_chunk_tiebreak{0};
1024 : :
1025 : : /** Which parent/child transactions we still need to process the chunks for. */
1026 : 9215362 : auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first;
1027 : 9215362 : unsigned steps = 0;
1028 [ + + ]: 58070406 : while (todo.Any()) {
1029 : 26119584 : ++steps;
1030 : : // Find a chunk for a transaction in todo, and remove all its transactions from todo.
1031 [ + + ]: 26119584 : auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx;
1032 : 26119584 : auto& reached_chunk_info = m_set_info[reached_chunk_idx];
1033 [ + + ]: 26119584 : todo -= reached_chunk_info.transactions;
1034 : : // See if it has an acceptable feerate.
1035 [ + + ]: 5156432 : auto cmp = DownWard ? ByRatio{best_other_chunk_feerate} <=> ByRatio{reached_chunk_info.feerate}
1036 [ + + ]: 20963152 : : ByRatio{reached_chunk_info.feerate} <=> ByRatio{best_other_chunk_feerate};
1037 [ + + ]: 26119584 : if (cmp > 0) continue;
1038 [ + + ]: 6943727 : uint64_t tiebreak = m_rng.rand64();
1039 [ + + + + ]: 6943727 : if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) {
1040 : 5268863 : best_other_chunk_feerate = reached_chunk_info.feerate;
1041 : 5268863 : best_other_chunk_idx = reached_chunk_idx;
1042 : 5268863 : best_other_chunk_tiebreak = tiebreak;
1043 : : }
1044 : : }
1045 [ - + ]: 9215362 : Assume(steps <= m_set_info.size());
1046 : :
1047 : 9215362 : m_cost.PickMergeCandidateEnd(/*num_steps=*/steps);
1048 : 9215362 : return best_other_chunk_idx;
1049 : : }
1050 : :
1051 : : /** Perform an upward or downward merge step, on the specified chunk. Returns the merged chunk,
1052 : : * or INVALID_SET_IDX if no merge took place. */
1053 : : template<bool DownWard>
1054 : 9215362 : SetIdx MergeStep(SetIdx chunk_idx) noexcept
1055 : : {
1056 : 9215362 : auto merge_chunk_idx = PickMergeCandidate<DownWard>(chunk_idx);
1057 [ + + ]: 9215362 : if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX;
1058 : 3784757 : chunk_idx = MergeChunksDirected<DownWard>(chunk_idx, merge_chunk_idx);
1059 : 3784757 : Assume(chunk_idx != INVALID_SET_IDX);
1060 : 3784757 : return chunk_idx;
1061 : : }
1062 : :
1063 : : /** Perform an upward or downward merge sequence on the specified chunk. */
1064 : : template<bool DownWard>
1065 : 437074 : void MergeSequence(SetIdx chunk_idx) noexcept
1066 : : {
1067 : 437074 : Assume(m_chunk_idxs[chunk_idx]);
1068 : 48896 : while (true) {
1069 : 485970 : auto merged_chunk_idx = MergeStep<DownWard>(chunk_idx);
1070 [ + + ]: 485970 : if (merged_chunk_idx == INVALID_SET_IDX) break;
1071 : 48896 : chunk_idx = merged_chunk_idx;
1072 : : }
1073 : : // Add the chunk to the queue of improvable chunks, if it wasn't already there.
1074 [ + + ]: 437074 : if (!m_suboptimal_idxs[chunk_idx]) {
1075 : 425140 : m_suboptimal_idxs.Set(chunk_idx);
1076 : 425140 : m_suboptimal_chunks.push_back(chunk_idx);
1077 : : }
1078 : 437074 : }
1079 : :
1080 : : /** Split a chunk, and then merge the resulting two chunks to make the graph topological
1081 : : * again. */
1082 : 1022867 : void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
1083 : : {
1084 : : // Deactivate the specified dependency, splitting it into two new chunks: a top containing
1085 : : // the parent, and a bottom containing the child. The top should have a higher feerate.
1086 [ + + ]: 1022867 : auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx);
1087 : :
1088 : : // At this point we have exactly two chunks which may violate topology constraints (the
1089 : : // parent chunk and child chunk that were produced by deactivation). We can fix
1090 : : // these using just merge sequences, one upwards and one downwards, avoiding the need for a
1091 : : // full MakeTopological.
1092 [ + + ]: 1022867 : const auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1093 [ + + ]: 1022867 : const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1094 [ + + ]: 1022867 : if (parent_reachable.Overlaps(child_chunk_txn)) {
1095 : : // The parent chunk has a dependency on a transaction in the child chunk. In this case,
1096 : : // the parent needs to merge back with the child chunk (a self-merge), and no other
1097 : : // merges are needed. Special-case this, so the overhead of PickMergeCandidate and
1098 : : // MergeSequence can be avoided.
1099 : :
1100 : : // In the self-merge, the roles reverse: the parent chunk (from the split) depends
1101 : : // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the
1102 : : // "bottom" for MergeChunks.
1103 : 804330 : auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1104 [ + - ]: 804330 : if (!m_suboptimal_idxs[merged_chunk_idx]) {
1105 : 804330 : m_suboptimal_idxs.Set(merged_chunk_idx);
1106 : 804330 : m_suboptimal_chunks.push_back(merged_chunk_idx);
1107 : : }
1108 : : } else {
1109 : : // Merge the top chunk with lower-feerate chunks it depends on.
1110 : 218537 : MergeSequence<false>(parent_chunk_idx);
1111 : : // Merge the bottom chunk with higher-feerate chunks that depend on it.
1112 : 218537 : MergeSequence<true>(child_chunk_idx);
1113 : : }
1114 : 1022867 : }
1115 : :
1116 : : /** Determine the next chunk to optimize, or INVALID_SET_IDX if none. */
1117 : 2558244 : SetIdx PickChunkToOptimize() noexcept
1118 : : {
1119 : : m_cost.PickChunkToOptimizeBegin();
1120 : 2558244 : unsigned steps{0};
1121 [ + - ]: 2565323 : while (!m_suboptimal_chunks.empty()) {
1122 : 2565323 : ++steps;
1123 : : // Pop an entry from the potentially-suboptimal chunk queue.
1124 : 2565323 : SetIdx chunk_idx = m_suboptimal_chunks.front();
1125 : 2565323 : Assume(m_suboptimal_idxs[chunk_idx]);
1126 : 2565323 : m_suboptimal_idxs.Reset(chunk_idx);
1127 : 2565323 : m_suboptimal_chunks.pop_front();
1128 [ + + ]: 2565323 : if (m_chunk_idxs[chunk_idx]) {
1129 : 2558244 : m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1130 : 2558244 : return chunk_idx;
1131 : : }
1132 : : // If what was popped is not currently a chunk, continue. This may
1133 : : // happen when a split chunk merges in Improve() with one or more existing chunks that
1134 : : // are themselves on the suboptimal queue already.
1135 : : }
1136 : 0 : m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1137 : 0 : return INVALID_SET_IDX;
1138 : : }
1139 : :
1140 : : /** Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none. */
1141 : 2558244 : std::pair<TxIdx, TxIdx> PickDependencyToSplit(SetIdx chunk_idx) noexcept
1142 : : {
1143 : : m_cost.PickDependencyToSplitBegin();
1144 [ + - ]: 2558244 : Assume(m_chunk_idxs[chunk_idx]);
1145 [ + - ]: 2558244 : auto& chunk_info = m_set_info[chunk_idx];
1146 : :
1147 : : // Remember the best dependency {par, chl} seen so far.
1148 : 2558244 : std::pair<TxIdx, TxIdx> candidate_dep = {TxIdx(-1), TxIdx(-1)};
1149 : 2558244 : uint64_t candidate_tiebreak = 0;
1150 : : // Iterate over all transactions.
1151 [ + + + + ]: 33636620 : for (auto tx_idx : chunk_info.transactions) {
1152 [ + + ]: 30161726 : const auto& tx_data = m_tx_data[tx_idx];
1153 : : // Iterate over all active child dependencies of the transaction.
1154 [ + + + + ]: 63337247 : for (auto child_idx : tx_data.active_children) {
1155 [ + + ]: 27603482 : auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1156 : : // Skip if this dependency is ineligible (the top chunk that would be created
1157 : : // does not have higher feerate than the chunk it is currently part of).
1158 [ + + ]: 27603482 : auto cmp = ByRatio{dep_top_info.feerate} <=> ByRatio{chunk_info.feerate};
1159 [ + + ]: 27603482 : if (cmp <= 0) continue;
1160 : : // Generate a random tiebreak for this dependency, and reject it if its tiebreak
1161 : : // is worse than the best so far. This means that among all eligible
1162 : : // dependencies, a uniformly random one will be chosen.
1163 : 4405969 : uint64_t tiebreak = m_rng.rand64();
1164 [ + + ]: 4405969 : if (tiebreak < candidate_tiebreak) continue;
1165 : : // Remember this as our (new) candidate dependency.
1166 : 1974385 : candidate_dep = {tx_idx, child_idx};
1167 : 1974385 : candidate_tiebreak = tiebreak;
1168 : : }
1169 : : }
1170 : 2558244 : m_cost.PickDependencyToSplitEnd(/*num_txns=*/chunk_info.transactions.Count());
1171 : 2558244 : return candidate_dep;
1172 : : }
1173 : :
1174 : : public:
1175 : : /** Construct a spanning forest for the given DepGraph, with every transaction in its own chunk
1176 : : * (not topological). */
1177 : 191345 : explicit SpanningForestState(const DepGraph<SetType>& depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel& cost = CostModel{}) noexcept :
1178 [ - + ]: 191345 : m_rng(rng_seed), m_depgraph(depgraph), m_cost(cost)
1179 : : {
1180 : 191345 : m_cost.InitializeBegin();
1181 : 191345 : m_transaction_idxs = depgraph.Positions();
1182 [ - + ]: 191345 : auto num_transactions = m_transaction_idxs.Count();
1183 [ - + ]: 191345 : m_tx_data.resize(depgraph.PositionRange());
1184 : 191345 : m_set_info.resize(num_transactions);
1185 : 191345 : m_reachable.resize(num_transactions);
1186 : 191345 : size_t num_chunks = 0;
1187 : 191345 : size_t num_deps = 0;
1188 [ + + + + ]: 5338604 : for (auto tx_idx : m_transaction_idxs) {
1189 : : // Fill in transaction data.
1190 : 5071714 : auto& tx_data = m_tx_data[tx_idx];
1191 : 5071714 : tx_data.parents = depgraph.GetReducedParents(tx_idx);
1192 [ + + + + ]: 21590859 : for (auto parent_idx : tx_data.parents) {
1193 : 15033181 : m_tx_data[parent_idx].children.Set(tx_idx);
1194 : : }
1195 : 5071714 : num_deps += tx_data.parents.Count();
1196 : : // Create a singleton chunk for it.
1197 : 5071714 : tx_data.chunk_idx = num_chunks;
1198 : 5071714 : m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx);
1199 : : }
1200 : : // Set the reachable transactions for each chunk to the transactions' parents and children.
1201 [ + + ]: 5263059 : for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) {
1202 [ + - ]: 6948428 : auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()];
1203 : 5071714 : m_reachable[chunk_idx].first = tx_data.parents;
1204 : 5071714 : m_reachable[chunk_idx].second = tx_data.children;
1205 : : }
1206 : 191345 : Assume(num_chunks == num_transactions);
1207 : : // Mark all chunk sets as chunks.
1208 : 191345 : m_chunk_idxs = SetType::Fill(num_chunks);
1209 : 191345 : m_cost.InitializeEnd(/*num_txns=*/num_chunks, /*num_deps=*/num_deps);
1210 : 191345 : }
1211 : :
1212 : : /** Load an existing linearization. Must be called immediately after constructor. The result is
1213 : : * topological if the linearization is valid. Otherwise, MakeTopological still needs to be
1214 : : * called. */
1215 : 144187 : void LoadLinearization(std::span<const DepGraphIndex> old_linearization) noexcept
1216 : : {
1217 : : // Add transactions one by one, in order of existing linearization.
1218 [ + + ]: 3949070 : for (DepGraphIndex tx_idx : old_linearization) {
1219 : 3804883 : auto chunk_idx = m_tx_data[tx_idx].chunk_idx;
1220 : : // Merge the chunk upwards, as long as merging succeeds.
1221 : : while (true) {
1222 : 6579200 : chunk_idx = MergeStep<false>(chunk_idx);
1223 [ + + ]: 6579200 : if (chunk_idx == INVALID_SET_IDX) break;
1224 : : }
1225 : : }
1226 : 144187 : }
1227 : :
1228 : : /** Make state topological. Can be called after constructing, or after LoadLinearization. */
1229 : 98874 : void MakeTopological() noexcept
1230 : : {
1231 : : m_cost.MakeTopologicalBegin();
1232 : 98874 : Assume(m_suboptimal_chunks.empty());
1233 : : /** What direction to initially merge chunks in; one of the two directions is enough. This
1234 : : * is sufficient because if a non-topological inactive dependency exists between two
1235 : : * chunks, at least one of the two chunks will eventually be processed in a direction that
1236 : : * discovers it - either the lower chunk tries upward, or the upper chunk tries downward.
1237 : : * Chunks that are the result of the merging are always tried in both directions. */
1238 : 98874 : unsigned init_dir = m_rng.randbool();
1239 : : /** Which chunks are the result of merging, and thus need merge attempts in both
1240 : : * directions. */
1241 : 98874 : SetType merged_chunks;
1242 : : // Mark chunks as suboptimal.
1243 : 98874 : m_suboptimal_idxs = m_chunk_idxs;
1244 [ + + + + ]: 1741040 : for (auto chunk_idx : m_chunk_idxs) {
1245 : 1601413 : m_suboptimal_chunks.emplace_back(chunk_idx);
1246 : : // Randomize the initial order of suboptimal chunks in the queue.
1247 : 1601413 : SetIdx j = m_rng.randrange<SetIdx>(m_suboptimal_chunks.size());
1248 [ + + ]: 1601413 : if (j != m_suboptimal_chunks.size() - 1) {
1249 : 1326217 : std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]);
1250 : : }
1251 : : }
1252 : 98874 : unsigned chunks = m_chunk_idxs.Count();
1253 : 98874 : unsigned steps = 0;
1254 [ + + ]: 2412598 : while (!m_suboptimal_chunks.empty()) {
1255 : 2313724 : ++steps;
1256 : : // Pop an entry from the potentially-suboptimal chunk queue.
1257 : 2313724 : SetIdx chunk_idx = m_suboptimal_chunks.front();
1258 : 2313724 : m_suboptimal_chunks.pop_front();
1259 [ + + ]: 2313724 : Assume(m_suboptimal_idxs[chunk_idx]);
1260 [ + + ]: 2313724 : m_suboptimal_idxs.Reset(chunk_idx);
1261 : : // If what was popped is not currently a chunk, continue. This may
1262 : : // happen when it was merged with something else since being added.
1263 [ + + ]: 2313724 : if (!m_chunk_idxs[chunk_idx]) continue;
1264 : : /** What direction(s) to attempt merging in. 1=up, 2=down, 3=both. */
1265 [ + + ]: 1898635 : unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1;
1266 : 1898635 : int flip = m_rng.randbool();
1267 [ + + ]: 4200363 : for (int i = 0; i < 2; ++i) {
1268 [ + + ]: 3263272 : if (i ^ flip) {
1269 [ + + ]: 1643459 : if (!(direction & 1)) continue;
1270 : : // Attempt to merge the chunk upwards.
1271 : 1065684 : auto result_up = MergeStep<false>(chunk_idx);
1272 [ + + ]: 1065684 : if (result_up != INVALID_SET_IDX) {
1273 [ + - ]: 504636 : if (!m_suboptimal_idxs[result_up]) {
1274 : 504636 : m_suboptimal_idxs.Set(result_up);
1275 : 504636 : m_suboptimal_chunks.push_back(result_up);
1276 : : }
1277 : 504636 : merged_chunks.Set(result_up);
1278 : 319557 : break;
1279 : : }
1280 : : } else {
1281 [ + + ]: 1619813 : if (!(direction & 2)) continue;
1282 : : // Attempt to merge the chunk downwards.
1283 : 1084508 : auto result_down = MergeStep<true>(chunk_idx);
1284 [ + + ]: 1084508 : if (result_down != INVALID_SET_IDX) {
1285 [ + + ]: 456908 : if (!m_suboptimal_idxs[result_down]) {
1286 : 207675 : m_suboptimal_idxs.Set(result_down);
1287 : 207675 : m_suboptimal_chunks.push_back(result_down);
1288 : : }
1289 : 456908 : merged_chunks.Set(result_down);
1290 : 290162 : break;
1291 : : }
1292 : : }
1293 : : }
1294 : : }
1295 : 98874 : m_cost.MakeTopologicalEnd(/*num_chunks=*/chunks, /*num_steps=*/steps);
1296 : 98874 : }
1297 : :
1298 : : /** Initialize the data structure for optimization. It must be topological already. */
1299 : 191345 : void StartOptimizing() noexcept
1300 : : {
1301 : : m_cost.StartOptimizingBegin();
1302 [ + - ]: 191345 : Assume(m_suboptimal_chunks.empty());
1303 : : // Mark chunks suboptimal.
1304 : 191345 : m_suboptimal_idxs = m_chunk_idxs;
1305 [ + + + + ]: 1602743 : for (auto chunk_idx : m_chunk_idxs) {
1306 : 1335853 : m_suboptimal_chunks.push_back(chunk_idx);
1307 : : // Randomize the initial order of suboptimal chunks in the queue.
1308 : 1335853 : SetIdx j = m_rng.randrange<SetIdx>(m_suboptimal_chunks.size());
1309 [ + + ]: 1335853 : if (j != m_suboptimal_chunks.size() - 1) {
1310 : 950442 : std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]);
1311 : : }
1312 : : }
1313 : 191345 : m_cost.StartOptimizingEnd(/*num_chunks=*/m_suboptimal_chunks.size());
1314 : 191345 : }
1315 : :
1316 : : /** Try to improve the forest. Returns false if it is optimal, true otherwise. */
1317 : 2558244 : bool OptimizeStep() noexcept
1318 : : {
1319 : 2558244 : auto chunk_idx = PickChunkToOptimize();
1320 [ + - ]: 2558244 : if (chunk_idx == INVALID_SET_IDX) {
1321 : : // No improvable chunk was found, we are done.
1322 : : return false;
1323 : : }
1324 [ + + ]: 2558244 : auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx);
1325 [ + + ]: 2558244 : if (parent_idx == TxIdx(-1)) {
1326 : : // Nothing to improve in chunk_idx. Need to continue with other chunks, if any.
1327 : 1535377 : return !m_suboptimal_chunks.empty();
1328 : : }
1329 : : // Deactivate the found dependency and then make the state topological again with a
1330 : : // sequence of merges.
1331 : 1022867 : Improve(parent_idx, child_idx);
1332 : 1022867 : return true;
1333 : : }
1334 : :
1335 : : /** Initialize data structure for minimizing the chunks. Can only be called if state is known
1336 : : * to be optimal. OptimizeStep() cannot be called anymore afterwards. */
1337 : 191345 : void StartMinimizing() noexcept
1338 : : {
1339 : : m_cost.StartMinimizingBegin();
1340 [ + - ]: 191345 : m_nonminimal_chunks.clear();
1341 [ + - ]: 191345 : m_nonminimal_chunks.reserve(m_transaction_idxs.Count());
1342 : : // Gather all chunks, and for each, add it with a random pivot in it, and a random initial
1343 : : // direction, to m_nonminimal_chunks.
1344 [ + + + + ]: 1772384 : for (auto chunk_idx : m_chunk_idxs) {
1345 : 1505494 : TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions);
1346 : 1505494 : m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>());
1347 : : // Randomize the initial order of nonminimal chunks in the queue.
1348 : 1505494 : SetIdx j = m_rng.randrange<SetIdx>(m_nonminimal_chunks.size());
1349 [ + + ]: 1505494 : if (j != m_nonminimal_chunks.size() - 1) {
1350 : 1103779 : std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]);
1351 : : }
1352 : : }
1353 : 191345 : m_cost.StartMinimizingEnd(/*num_chunks=*/m_nonminimal_chunks.size());
1354 : 191345 : }
1355 : :
1356 : : /** Try to reduce a chunk's size. Returns false if all chunks are minimal, true otherwise. */
1357 : 2320492 : bool MinimizeStep() noexcept
1358 : : {
1359 : : // If the queue of potentially-non-minimal chunks is empty, we are done.
1360 [ + + ]: 2320492 : if (m_nonminimal_chunks.empty()) return false;
1361 : : m_cost.MinimizeStepBegin();
1362 : : // Pop an entry from the potentially-non-minimal chunk queue.
1363 : 2129147 : auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front();
1364 : 2129147 : m_nonminimal_chunks.pop_front();
1365 [ + - ]: 2129147 : auto& chunk_info = m_set_info[chunk_idx];
1366 : : /** Whether to move the pivot down rather than up. */
1367 : 2129147 : bool move_pivot_down = flags & 1;
1368 : : /** Whether this is already the second stage. */
1369 : 2129147 : bool second_stage = flags & 2;
1370 : :
1371 : : // Find a random dependency whose top and bottom set feerates are equal, and which has
1372 : : // pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down).
1373 : 2129147 : std::pair<TxIdx, TxIdx> candidate_dep;
1374 : 2129147 : uint64_t candidate_tiebreak{0};
1375 : 2129147 : bool have_any = false;
1376 : : // Iterate over all transactions.
1377 [ + + + + ]: 11442543 : for (auto tx_idx : chunk_info.transactions) {
1378 [ + + ]: 8461423 : const auto& tx_data = m_tx_data[tx_idx];
1379 : : // Iterate over all active child dependencies of the transaction.
1380 [ + + + + ]: 15998235 : for (auto child_idx : tx_data.active_children) {
1381 [ + + ]: 6332276 : const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1382 : : // Skip if this dependency does not have equal top and bottom set feerates. Note
1383 : : // that the top cannot have higher feerate than the bottom, or OptimizeSteps would
1384 : : // have dealt with it.
1385 [ + + ]: 6332276 : if (ByRatio{dep_top_info.feerate} < ByRatio{chunk_info.feerate}) continue;
1386 : 3052096 : have_any = true;
1387 : : // Skip if this dependency does not have pivot in the right place.
1388 [ + + ]: 3052096 : if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue;
1389 : : // Remember this as our chosen dependency if it has a better tiebreak.
1390 : 2410143 : uint64_t tiebreak = m_rng.rand64() | 1;
1391 [ + + ]: 2410143 : if (tiebreak > candidate_tiebreak) {
1392 : 621548 : candidate_tiebreak = tiebreak;
1393 : 621548 : candidate_dep = {tx_idx, child_idx};
1394 : : }
1395 : : }
1396 : : }
1397 [ + + ]: 2129147 : m_cost.MinimizeStepMid(/*num_txns=*/chunk_info.transactions.Count());
1398 : : // If no dependencies have equal top and bottom set feerate, this chunk is minimal.
1399 [ + + ]: 2129147 : if (!have_any) return true;
1400 : : // If all found dependencies have the pivot in the wrong place, try moving it in the other
1401 : : // direction. If this was the second stage already, we are done.
1402 [ + + ]: 337603 : if (candidate_tiebreak == 0) {
1403 : : // Switch to other direction, and to second phase.
1404 : 50840 : flags ^= 3;
1405 [ + - ]: 50840 : if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags);
1406 : 50840 : return true;
1407 : : }
1408 : :
1409 : : // Otherwise, deactivate the dependency that was found.
1410 [ + + ]: 286763 : auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second);
1411 : : // Determine if there is a dependency from the new bottom to the new top (opposite from the
1412 : : // dependency that was just deactivated).
1413 [ + + ]: 286763 : auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1414 [ + + ]: 286763 : auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1415 [ + + ]: 286763 : if (parent_reachable.Overlaps(child_chunk_txn)) {
1416 : : // A self-merge is needed. Note that the child_chunk_idx is the top, and
1417 : : // parent_chunk_idx is the bottom, because we activate a dependency in the reverse
1418 : : // direction compared to the deactivation above.
1419 : 713 : auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1420 : : // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx
1421 : : // will have changed.
1422 : 713 : m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags);
1423 : 713 : m_cost.MinimizeStepEnd(/*split=*/false);
1424 : : } else {
1425 : : // No self-merge happens, and thus we have found a way to split the chunk. Create two
1426 : : // smaller chunks, and add them to the queue. The one that contains the current pivot
1427 : : // gets to continue with it in the same direction, to minimize the number of times we
1428 : : // alternate direction. If we were in the second phase already, the newly created chunk
1429 : : // inherits that too, because we know no split with the pivot on the other side is
1430 : : // possible already. The new chunk without the current pivot gets a new randomly-chosen
1431 : : // one.
1432 [ + + ]: 286050 : if (move_pivot_down) {
1433 : 80160 : auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions);
1434 : 80160 : m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>());
1435 : 80160 : m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags);
1436 : : } else {
1437 : 205890 : auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions);
1438 : 205890 : m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags);
1439 : 205890 : m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>());
1440 : : }
1441 [ + + ]: 286050 : if (m_rng.randbool()) {
1442 : 143230 : std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[m_nonminimal_chunks.size() - 2]);
1443 : : }
1444 : 286050 : m_cost.MinimizeStepEnd(/*split=*/true);
1445 : : }
1446 : : return true;
1447 : : }
1448 : :
1449 : : /** Construct a topologically-valid linearization from the current forest state. Must be
1450 : : * topological. fallback_order is a comparator that defines a strong order for DepGraphIndexes
1451 : : * in this cluster, used to order equal-feerate transactions and chunks.
1452 : : *
1453 : : * Specifically, the resulting order consists of:
1454 : : * - The chunks of the current SFL state, sorted by (in decreasing order of priority):
1455 : : * - topology (parents before children)
1456 : : * - highest chunk feerate first
1457 : : * - smallest chunk size first
1458 : : * - the chunk with the lowest maximum transaction, by fallback_order, first
1459 : : * - The transactions within a chunk, sorted by (in decreasing order of priority):
1460 : : * - topology (parents before children)
1461 : : * - highest tx feerate first
1462 : : * - smallest tx size first
1463 : : * - the lowest transaction, by fallback_order, first
1464 : : */
1465 : 191345 : std::vector<DepGraphIndex> GetLinearization(const StrongComparator<DepGraphIndex> auto& fallback_order) noexcept
1466 : : {
1467 : : m_cost.GetLinearizationBegin();
1468 : : /** The output linearization. */
1469 : 191345 : std::vector<DepGraphIndex> ret;
1470 [ - + ]: 191345 : ret.reserve(m_set_info.size());
1471 : : /** A heap with all chunks (by set index) that can currently be included, sorted by
1472 : : * chunk feerate (high to low), chunk size (small to large), and by least maximum element
1473 : : * according to the fallback order (which is the second pair element). */
1474 : 191345 : std::vector<std::pair<SetIdx, TxIdx>> ready_chunks;
1475 : : /** For every chunk, indexed by SetIdx, the number of unmet dependencies the chunk has on
1476 : : * other chunks (not including dependencies within the chunk itself). */
1477 [ - + - + ]: 191345 : std::vector<TxIdx> chunk_deps(m_set_info.size(), 0);
1478 : : /** For every transaction, indexed by TxIdx, the number of unmet dependencies the
1479 : : * transaction has. */
1480 [ - + + - ]: 191345 : std::vector<TxIdx> tx_deps(m_tx_data.size(), 0);
1481 : : /** A heap with all transactions within the current chunk that can be included, sorted by
1482 : : * tx feerate (high to low), tx size (small to large), and fallback order. */
1483 : 191345 : std::vector<TxIdx> ready_tx;
1484 : : // Populate chunk_deps and tx_deps.
1485 : 191345 : unsigned num_deps{0};
1486 [ + + + + ]: 5338604 : for (TxIdx chl_idx : m_transaction_idxs) {
1487 : 5071714 : const auto& chl_data = m_tx_data[chl_idx];
1488 : 5071714 : tx_deps[chl_idx] = chl_data.parents.Count();
1489 : 5071714 : num_deps += tx_deps[chl_idx];
1490 : 5071714 : auto chl_chunk_idx = chl_data.chunk_idx;
1491 : 5071714 : auto& chl_chunk_info = m_set_info[chl_chunk_idx];
1492 : 5071714 : chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count();
1493 : : }
1494 : : /** Function to compute the highest element of a chunk, by fallback_order. */
1495 : 1982889 : auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept {
1496 [ + - ]: 1791544 : auto& chunk = m_set_info[chunk_idx].transactions;
[ + - + - ]
1497 : 1791544 : auto it = chunk.begin();
1498 : 1791544 : DepGraphIndex ret = *it;
1499 : 1791544 : ++it;
1500 [ + + ][ + + : 5071714 : while (it != chunk.end()) {
+ + + + +
+ + + ]
1501 [ + + ][ + - : 6552770 : if (fallback_order(*it, ret) > 0) ret = *it;
+ - + - +
- + - + -
+ - + - +
- + - ]
1502 : 3280170 : ++it;
1503 : : }
1504 : 1791544 : return ret;
1505 : : };
1506 : : /** Comparison function for the transaction heap. Note that it is a max-heap, so
1507 : : * tx_cmp_fn(a, b) == true means "a appears after b in the linearization". */
1508 : 8773645 : auto tx_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1509 : : // Bail out for identical transactions.
1510 [ + - ][ + - : 8582300 : if (a == b) return false;
+ - + - +
- + - ]
1511 : : // First sort by increasing transaction feerate.
1512 [ + + ][ + + : 8582300 : auto& a_feerate = m_depgraph.FeeRate(a);
+ + + + +
+ + + ]
1513 [ + + ][ + + : 8582300 : auto& b_feerate = m_depgraph.FeeRate(b);
+ + + + +
+ + + ]
1514 [ + + ][ + + : 8582300 : auto feerate_cmp = ByRatio{a_feerate} <=> ByRatio{b_feerate};
+ + + + +
+ + + ]
1515 [ + + ][ + + : 8582300 : if (feerate_cmp != 0) return feerate_cmp < 0;
+ + + + +
+ + + ]
1516 : : // Then by decreasing transaction size.
1517 [ - + ][ + + : 3466881 : if (a_feerate.size != b_feerate.size) {
+ + + + +
+ + + ]
1518 : 3400 : return a_feerate.size > b_feerate.size;
1519 : : }
1520 : : // Tie-break by decreasing fallback_order.
1521 [ + - + - : 6904481 : auto fallback_cmp = fallback_order(a, b);
+ - + - +
- + - + -
+ - + - +
- ]
1522 [ + - ][ + - : 3463481 : if (fallback_cmp != 0) return fallback_cmp > 0;
+ - + - +
- + - ]
1523 : : // This should not be hit, because fallback_order defines a strong ordering.
1524 : 0 : Assume(false);
1525 : 0 : return a < b;
1526 : : };
1527 : : // Construct a heap with all chunks that have no out-of-chunk dependencies.
1528 : : /** Comparison function for the chunk heap. Note that it is a max-heap, so
1529 : : * chunk_cmp_fn(a, b) == true means "a appears after b in the linearization". */
1530 : 5428996 : auto chunk_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1531 : : // Bail out for identical chunks.
1532 [ + - ][ + - : 5237651 : if (a.first == b.first) return false;
+ - + - +
- + - ]
1533 : : // First sort by increasing chunk feerate.
1534 [ + + ][ + + : 5237651 : auto& chunk_feerate_a = m_set_info[a.first].feerate;
+ + + + +
+ + + ]
1535 [ + + ][ + + : 5237651 : auto& chunk_feerate_b = m_set_info[b.first].feerate;
+ + + + +
+ + + ]
1536 [ + + ][ + + : 5237651 : auto feerate_cmp = ByRatio{chunk_feerate_a} <=> ByRatio{chunk_feerate_b};
+ + + + +
+ + + ]
1537 [ + + ][ + + : 5237651 : if (feerate_cmp != 0) return feerate_cmp < 0;
+ + + + +
+ + + ]
1538 : : // Then by decreasing chunk size.
1539 [ - + ][ + + : 1842012 : if (chunk_feerate_a.size != chunk_feerate_b.size) {
+ + + + +
+ + + ]
1540 : 69090 : return chunk_feerate_a.size > chunk_feerate_b.size;
1541 : : }
1542 : : // Tie-break by decreasing fallback_order.
1543 [ + - + - : 3503948 : auto fallback_cmp = fallback_order(a.second, b.second);
+ - + - +
- + - + -
+ - + - +
- ]
1544 [ + - ][ + - : 1772922 : if (fallback_cmp != 0) return fallback_cmp > 0;
+ - + - +
- + - ]
1545 : : // This should not be hit, because fallback_order defines a strong ordering.
1546 : 0 : Assume(false);
1547 : 0 : return a.second < b.second;
1548 : : };
1549 : : // Construct a heap with all chunks that have no out-of-chunk dependencies.
1550 [ + + + + ]: 2058434 : for (SetIdx chunk_idx : m_chunk_idxs) {
1551 [ + + ]: 1791544 : if (chunk_deps[chunk_idx] == 0) {
1552 : 460809 : ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx));
1553 : : }
1554 : : }
1555 : 191345 : std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1556 : : // Pop chunks off the heap.
1557 [ + + ]: 1982889 : while (!ready_chunks.empty()) {
1558 : 1791544 : auto [chunk_idx, _rnd] = ready_chunks.front();
1559 [ + - ]: 1791544 : std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1560 : 1791544 : ready_chunks.pop_back();
1561 [ + - ]: 1791544 : Assume(chunk_deps[chunk_idx] == 0);
1562 [ + - ]: 1791544 : const auto& chunk_txn = m_set_info[chunk_idx].transactions;
1563 : : // Build heap of all includable transactions in chunk.
1564 : 1791544 : Assume(ready_tx.empty());
1565 [ + + + + ]: 7547402 : for (TxIdx tx_idx : chunk_txn) {
1566 [ + + ]: 5071714 : if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx);
1567 : : }
1568 : 1791544 : Assume(!ready_tx.empty());
1569 : 1791544 : std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1570 : : // Pick transactions from the ready heap, append them to linearization, and decrement
1571 : : // dependency counts.
1572 [ + + ]: 6863258 : while (!ready_tx.empty()) {
1573 : : // Pop an element from the tx_ready heap.
1574 : 5071714 : auto tx_idx = ready_tx.front();
1575 : 5071714 : std::pop_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1576 : 5071714 : ready_tx.pop_back();
1577 : : // Append to linearization.
1578 : 5071714 : ret.push_back(tx_idx);
1579 : : // Decrement dependency counts.
1580 [ + + ]: 5071714 : auto& tx_data = m_tx_data[tx_idx];
1581 [ + + + + ]: 21082152 : for (TxIdx chl_idx : tx_data.children) {
1582 [ + + ]: 15033181 : auto& chl_data = m_tx_data[chl_idx];
1583 : : // Decrement tx dependency count.
1584 : 15033181 : Assume(tx_deps[chl_idx] > 0);
1585 [ + + + + ]: 15033181 : if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) {
1586 : : // Child tx has no dependencies left, and is in this chunk. Add it to the tx heap.
1587 : 2667834 : ready_tx.push_back(chl_idx);
1588 : 2667834 : std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1589 : : }
1590 : : // Decrement chunk dependency count if this is out-of-chunk dependency.
1591 [ + + ]: 15033181 : if (chl_data.chunk_idx != chunk_idx) {
1592 [ + + ]: 8149209 : Assume(chunk_deps[chl_data.chunk_idx] > 0);
1593 [ + + ]: 8149209 : if (--chunk_deps[chl_data.chunk_idx] == 0) {
1594 : : // Child chunk has no dependencies left. Add it to the chunk heap.
1595 : 1330735 : ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx));
1596 : 1330735 : std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1597 : : }
1598 : : }
1599 : : }
1600 : : }
1601 : : }
1602 [ - + - + ]: 191345 : Assume(ret.size() == m_set_info.size());
1603 : 191345 : m_cost.GetLinearizationEnd(/*num_txns=*/m_set_info.size(), /*num_deps=*/num_deps);
1604 : 191345 : return ret;
1605 : 191345 : }
1606 : :
1607 : : /** Get the diagram for the current state, which must be topological. Test-only.
1608 : : *
1609 : : * The linearization produced by GetLinearization() is always at least as good (in the
1610 : : * CompareChunks() sense) as this diagram, but may be better.
1611 : : *
1612 : : * After an OptimizeStep(), the diagram will always be at least as good as before. Once
1613 : : * OptimizeStep() returns false, the diagram will be equivalent to that produced by
1614 : : * GetLinearization(), and optimal.
1615 : : *
1616 : : * After a MinimizeStep(), the diagram cannot change anymore (in the CompareChunks() sense),
1617 : : * but its number of segments can increase still. Once MinimizeStep() returns false, the number
1618 : : * of chunks of the produced linearization will match the number of segments in the diagram.
1619 : : */
1620 : : std::vector<FeeFrac> GetDiagram() const noexcept
1621 : : {
1622 : : std::vector<FeeFrac> ret;
1623 : : for (auto chunk_idx : m_chunk_idxs) {
1624 : : ret.push_back(m_set_info[chunk_idx].feerate);
1625 : : }
1626 : : std::ranges::sort(ret, std::greater<ByRatioNegSize<FeeFrac>>{});
1627 : : return ret;
1628 : : }
1629 : :
1630 : : /** Determine how much work was performed so far. */
1631 : 5070081 : uint64_t GetCost() const noexcept { return m_cost.GetCost(); }
1632 : :
1633 : : /** Verify internal consistency of the data structure. */
1634 : : void SanityCheck() const
1635 : : {
1636 : : //
1637 : : // Verify dependency parent/child information, and build list of (active) dependencies.
1638 : : //
1639 : : std::vector<std::pair<TxIdx, TxIdx>> expected_dependencies;
1640 : : std::vector<std::pair<TxIdx, TxIdx>> all_dependencies;
1641 : : std::vector<std::pair<TxIdx, TxIdx>> active_dependencies;
1642 : : for (auto parent_idx : m_depgraph.Positions()) {
1643 : : for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) {
1644 : : expected_dependencies.emplace_back(parent_idx, child_idx);
1645 : : }
1646 : : }
1647 : : for (auto tx_idx : m_transaction_idxs) {
1648 : : for (auto child_idx : m_tx_data[tx_idx].children) {
1649 : : all_dependencies.emplace_back(tx_idx, child_idx);
1650 : : if (m_tx_data[tx_idx].active_children[child_idx]) {
1651 : : active_dependencies.emplace_back(tx_idx, child_idx);
1652 : : }
1653 : : }
1654 : : }
1655 : : std::ranges::sort(expected_dependencies);
1656 : : std::ranges::sort(all_dependencies);
1657 : : assert(expected_dependencies == all_dependencies);
1658 : :
1659 : : //
1660 : : // Verify the chunks against the list of active dependencies
1661 : : //
1662 : : SetType chunk_cover;
1663 : : for (auto chunk_idx : m_chunk_idxs) {
1664 : : const auto& chunk_info = m_set_info[chunk_idx];
1665 : : // Verify that transactions in the chunk point back to it. This guarantees
1666 : : // that chunks are non-overlapping.
1667 : : for (auto tx_idx : chunk_info.transactions) {
1668 : : assert(m_tx_data[tx_idx].chunk_idx == chunk_idx);
1669 : : }
1670 : : assert(!chunk_cover.Overlaps(chunk_info.transactions));
1671 : : chunk_cover |= chunk_info.transactions;
1672 : : // Verify the chunk's transaction set: start from an arbitrary chunk transaction,
1673 : : // and for every active dependency, if it contains the parent or child, add the
1674 : : // other. It must have exactly N-1 active dependencies in it, guaranteeing it is
1675 : : // acyclic.
1676 : : assert(chunk_info.transactions.Any());
1677 : : SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First());
1678 : : while (true) {
1679 : : auto old = expected_chunk;
1680 : : size_t active_dep_count{0};
1681 : : for (const auto& [par, chl] : active_dependencies) {
1682 : : if (expected_chunk[par] || expected_chunk[chl]) {
1683 : : expected_chunk.Set(par);
1684 : : expected_chunk.Set(chl);
1685 : : ++active_dep_count;
1686 : : }
1687 : : }
1688 : : if (old == expected_chunk) {
1689 : : assert(expected_chunk.Count() == active_dep_count + 1);
1690 : : break;
1691 : : }
1692 : : }
1693 : : assert(chunk_info.transactions == expected_chunk);
1694 : : // Verify the chunk's feerate.
1695 : : assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions));
1696 : : // Verify the chunk's reachable transactions.
1697 : : assert(m_reachable[chunk_idx] == GetReachable(expected_chunk));
1698 : : // Verify that the chunk's reachable transactions don't include its own transactions.
1699 : : assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions));
1700 : : assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions));
1701 : : }
1702 : : // Verify that together, the chunks cover all transactions.
1703 : : assert(chunk_cover == m_depgraph.Positions());
1704 : :
1705 : : //
1706 : : // Verify transaction data.
1707 : : //
1708 : : assert(m_transaction_idxs == m_depgraph.Positions());
1709 : : for (auto tx_idx : m_transaction_idxs) {
1710 : : const auto& tx_data = m_tx_data[tx_idx];
1711 : : // Verify it has a valid chunk index, and that chunk includes this transaction.
1712 : : assert(m_chunk_idxs[tx_data.chunk_idx]);
1713 : : assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]);
1714 : : // Verify parents/children.
1715 : : assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx));
1716 : : assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx));
1717 : : // Verify active_children is a subset of children.
1718 : : assert(tx_data.active_children.IsSubsetOf(tx_data.children));
1719 : : // Verify each active child's dep_top_idx points to a valid non-chunk set.
1720 : : for (auto child_idx : tx_data.active_children) {
1721 : : assert(tx_data.dep_top_idx[child_idx] < m_set_info.size());
1722 : : assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]);
1723 : : }
1724 : : }
1725 : :
1726 : : //
1727 : : // Verify active dependencies' top sets.
1728 : : //
1729 : : for (const auto& [par_idx, chl_idx] : active_dependencies) {
1730 : : // Verify the top set's transactions: it must contain the parent, and for every
1731 : : // active dependency, except the chl_idx->par_idx dependency itself, if it contains the
1732 : : // parent or child, it must contain both. It must have exactly N-1 active dependencies
1733 : : // in it, guaranteeing it is acyclic.
1734 : : SetType expected_top = SetType::Singleton(par_idx);
1735 : : while (true) {
1736 : : auto old = expected_top;
1737 : : size_t active_dep_count{0};
1738 : : for (const auto& [par2_idx, chl2_idx] : active_dependencies) {
1739 : : if (par_idx == par2_idx && chl_idx == chl2_idx) continue;
1740 : : if (expected_top[par2_idx] || expected_top[chl2_idx]) {
1741 : : expected_top.Set(par2_idx);
1742 : : expected_top.Set(chl2_idx);
1743 : : ++active_dep_count;
1744 : : }
1745 : : }
1746 : : if (old == expected_top) {
1747 : : assert(expected_top.Count() == active_dep_count + 1);
1748 : : break;
1749 : : }
1750 : : }
1751 : : assert(!expected_top[chl_idx]);
1752 : : auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]];
1753 : : assert(dep_top_info.transactions == expected_top);
1754 : : // Verify the top set's feerate.
1755 : : assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions));
1756 : : }
1757 : :
1758 : : //
1759 : : // Verify m_suboptimal_chunks.
1760 : : //
1761 : : SetType suboptimal_idxs;
1762 : : for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) {
1763 : : auto chunk_idx = m_suboptimal_chunks[i];
1764 : : assert(!suboptimal_idxs[chunk_idx]);
1765 : : suboptimal_idxs.Set(chunk_idx);
1766 : : }
1767 : : assert(m_suboptimal_idxs == suboptimal_idxs);
1768 : :
1769 : : //
1770 : : // Verify m_nonminimal_chunks.
1771 : : //
1772 : : SetType nonminimal_idxs;
1773 : : for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) {
1774 : : auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i];
1775 : : assert(m_tx_data[pivot].chunk_idx == chunk_idx);
1776 : : assert(!nonminimal_idxs[chunk_idx]);
1777 : : nonminimal_idxs.Set(chunk_idx);
1778 : : }
1779 : : assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs));
1780 : : }
1781 : : };
1782 : :
1783 : : /** Find or improve a linearization for a cluster.
1784 : : *
1785 : : * @param[in] depgraph Dependency graph of the cluster to be linearized.
1786 : : * @param[in] max_cost Upper bound on the amount of work that will be done.
1787 : : * @param[in] rng_seed A random number seed to control search order. This prevents peers
1788 : : * from predicting exactly which clusters would be hard for us to
1789 : : * linearize.
1790 : : * @param[in] fallback_order A comparator to order transactions, used to sort equal-feerate
1791 : : * chunks and transactions. See SpanningForestState::GetLinearization
1792 : : * for details.
1793 : : * @param[in] old_linearization An existing linearization for the cluster, or empty.
1794 : : * @param[in] is_topological (Only relevant if old_linearization is not empty) Whether
1795 : : * old_linearization is topologically valid.
1796 : : * @return A tuple of:
1797 : : * - The resulting linearization. It is guaranteed to be at least as
1798 : : * good (in the feerate diagram sense) as old_linearization.
1799 : : * - A boolean indicating whether the result is guaranteed to be
1800 : : * optimal with minimal chunks.
1801 : : * - How many optimization steps were actually performed.
1802 : : */
1803 : : template<typename SetType>
1804 : 191345 : std::tuple<std::vector<DepGraphIndex>, bool, uint64_t> Linearize(
1805 : : const DepGraph<SetType>& depgraph,
1806 : : uint64_t max_cost,
1807 : : uint64_t rng_seed,
1808 : : const StrongComparator<DepGraphIndex> auto& fallback_order,
1809 : : std::span<const DepGraphIndex> old_linearization = {},
1810 : : bool is_topological = true) noexcept
1811 : : {
1812 : : /** Initialize a spanning forest data structure for this cluster. */
1813 [ + + ]: 191345 : SpanningForestState forest(depgraph, rng_seed);
1814 [ + + ]: 191345 : if (!old_linearization.empty()) {
1815 : 144187 : forest.LoadLinearization(old_linearization);
1816 [ + + ]: 144187 : if (!is_topological) forest.MakeTopological();
1817 : : } else {
1818 : 47158 : forest.MakeTopological();
1819 : : }
1820 : : // Make improvement steps to it until we hit the max_iterations limit, or an optimal result
1821 : : // is found.
1822 [ + - ]: 191345 : if (forest.GetCost() < max_cost) {
1823 : 191345 : forest.StartOptimizing();
1824 : : do {
1825 [ + + ]: 2558244 : if (!forest.OptimizeStep()) break;
1826 [ + - ]: 2366899 : } while (forest.GetCost() < max_cost);
1827 : : }
1828 : : // Make chunk minimization steps until we hit the max_iterations limit, or all chunks are
1829 : : // minimal.
1830 : 191345 : bool optimal = false;
1831 [ + - ]: 191345 : if (forest.GetCost() < max_cost) {
1832 : 191345 : forest.StartMinimizing();
1833 : : do {
1834 [ + + ]: 2320492 : if (!forest.MinimizeStep()) {
1835 : : optimal = true;
1836 : : break;
1837 : : }
1838 [ + - ]: 2129147 : } while (forest.GetCost() < max_cost);
1839 : : }
1840 : 191345 : return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()};
1841 : 191345 : }
1842 : :
1843 : : /** Improve a given linearization.
1844 : : *
1845 : : * @param[in] depgraph Dependency graph of the cluster being linearized.
1846 : : * @param[in,out] linearization On input, an existing linearization for depgraph. On output, a
1847 : : * potentially better linearization for the same graph.
1848 : : *
1849 : : * Postlinearization guarantees:
1850 : : * - The resulting chunks are connected.
1851 : : * - If the input has a tree shape (either all transactions have at most one child, or all
1852 : : * transactions have at most one parent), the result is optimal.
1853 : : * - Given a linearization L1 and a leaf transaction T in it. Let L2 be L1 with T moved to the end,
1854 : : * optionally with its fee increased. Let L3 be the postlinearization of L2. L3 will be at least
1855 : : * as good as L1. This means that replacing transactions with same-size higher-fee transactions
1856 : : * will not worsen linearizations through a "drop conflicts, append new transactions,
1857 : : * postlinearize" process.
1858 : : */
1859 : : template<typename SetType>
1860 [ - + ]: 5145 : void PostLinearize(const DepGraph<SetType>& depgraph, std::span<DepGraphIndex> linearization)
1861 : : {
1862 : : // This algorithm performs a number of passes (currently 2); the even ones operate from back to
1863 : : // front, the odd ones from front to back. Each results in an equal-or-better linearization
1864 : : // than the one started from.
1865 : : // - One pass in either direction guarantees that the resulting chunks are connected.
1866 : : // - Each direction corresponds to one shape of tree being linearized optimally (forward passes
1867 : : // guarantee this for graphs where each transaction has at most one child; backward passes
1868 : : // guarantee this for graphs where each transaction has at most one parent).
1869 : : // - Starting with a backward pass guarantees the moved-tree property.
1870 : : //
1871 : : // During an odd (forward) pass, the high-level operation is:
1872 : : // - Start with an empty list of groups L=[].
1873 : : // - For every transaction i in the old linearization, from front to back:
1874 : : // - Append a new group C=[i], containing just i, to the back of L.
1875 : : // - While L has at least one group before C, and the group immediately before C has feerate
1876 : : // lower than C:
1877 : : // - If C depends on P:
1878 : : // - Merge P into C, making C the concatenation of P+C, continuing with the combined C.
1879 : : // - Otherwise:
1880 : : // - Swap P with C, continuing with the now-moved C.
1881 : : // - The output linearization is the concatenation of the groups in L.
1882 : : //
1883 : : // During even (backward) passes, i iterates from the back to the front of the existing
1884 : : // linearization, and new groups are prepended instead of appended to the list L. To enable
1885 : : // more code reuse, both passes append groups, but during even passes the meanings of
1886 : : // parent/child, and of high/low feerate are reversed, and the final concatenation is reversed
1887 : : // on output.
1888 : : //
1889 : : // In the implementation below, the groups are represented by singly-linked lists (pointing
1890 : : // from the back to the front), which are themselves organized in a singly-linked circular
1891 : : // list (each group pointing to its predecessor, with a special sentinel group at the front
1892 : : // that points back to the last group).
1893 : : //
1894 : : // Information about transaction t is stored in entries[t + 1], while the sentinel is in
1895 : : // entries[0].
1896 : :
1897 : : /** Index of the sentinel in the entries array below. */
1898 : : static constexpr DepGraphIndex SENTINEL{0};
1899 : : /** Indicator that a group has no previous transaction. */
1900 : : static constexpr DepGraphIndex NO_PREV_TX{0};
1901 : :
1902 : :
1903 : : /** Data structure per transaction entry. */
1904 : 73859 : struct TxEntry
1905 : : {
1906 : : /** The index of the previous transaction in this group; NO_PREV_TX if this is the first
1907 : : * entry of a group. */
1908 : : DepGraphIndex prev_tx;
1909 : :
1910 : : // The fields below are only used for transactions that are the last one in a group
1911 : : // (referred to as tail transactions below).
1912 : :
1913 : : /** Index of the first transaction in this group, possibly itself. */
1914 : : DepGraphIndex first_tx;
1915 : : /** Index of the last transaction in the previous group. The first group (the sentinel)
1916 : : * points back to the last group here, making it a singly-linked circular list. */
1917 : : DepGraphIndex prev_group;
1918 : : /** All transactions in the group. Empty for the sentinel. */
1919 : : SetType group;
1920 : : /** All dependencies of the group (descendants in even passes; ancestors in odd ones). */
1921 : : SetType deps;
1922 : : /** The combined fee/size of transactions in the group. Fee is negated in even passes. */
1923 : : FeeFrac feerate;
1924 : : };
1925 : :
1926 : : // As an example, consider the state corresponding to the linearization [1,0,3,2], with
1927 : : // groups [1,0,3] and [2], in an odd pass. The linked lists would be:
1928 : : //
1929 : : // +-----+
1930 : : // 0<-P-- | 0 S | ---\ Legend:
1931 : : // +-----+ |
1932 : : // ^ | - digit in box: entries index
1933 : : // /--------------F---------+ G | (note: one more than tx value)
1934 : : // v \ | | - S: sentinel group
1935 : : // +-----+ +-----+ +-----+ | (empty feerate)
1936 : : // 0<-P-- | 2 | <--P-- | 1 | <--P-- | 4 T | | - T: tail transaction, contains
1937 : : // +-----+ +-----+ +-----+ | fields beyond prev_tv.
1938 : : // ^ | - P: prev_tx reference
1939 : : // G G - F: first_tx reference
1940 : : // | | - G: prev_group reference
1941 : : // +-----+ |
1942 : : // 0<-P-- | 3 T | <--/
1943 : : // +-----+
1944 : : // ^ |
1945 : : // \-F-/
1946 : : //
1947 : : // During an even pass, the diagram above would correspond to linearization [2,3,0,1], with
1948 : : // groups [2] and [3,0,1].
1949 : :
1950 : 5145 : std::vector<TxEntry> entries(depgraph.PositionRange() + 1);
1951 : :
1952 : : // Perform two passes over the linearization.
1953 [ + + ]: 15435 : for (int pass = 0; pass < 2; ++pass) {
1954 : 10290 : int rev = !(pass & 1);
1955 : : // Construct a sentinel group, identifying the start of the list.
1956 : 10290 : entries[SENTINEL].prev_group = SENTINEL;
1957 : 10290 : Assume(entries[SENTINEL].feerate.IsEmpty());
1958 : :
1959 : : // Iterate over all elements in the existing linearization.
1960 [ + + ]: 147718 : for (DepGraphIndex i = 0; i < linearization.size(); ++i) {
1961 : : // Even passes are from back to front; odd passes from front to back.
1962 [ + + ]: 137428 : DepGraphIndex idx = linearization[rev ? linearization.size() - 1 - i : i];
1963 : : // Construct a new group containing just idx. In even passes, the meaning of
1964 : : // parent/child and high/low feerate are swapped.
1965 [ + + ]: 137428 : DepGraphIndex cur_group = idx + 1;
1966 [ + + ]: 137428 : entries[cur_group].group = SetType::Singleton(idx);
1967 [ + + + + ]: 137428 : entries[cur_group].deps = rev ? depgraph.Descendants(idx): depgraph.Ancestors(idx);
1968 : 137428 : entries[cur_group].feerate = depgraph.FeeRate(idx);
1969 [ + + ]: 137428 : if (rev) entries[cur_group].feerate.fee = -entries[cur_group].feerate.fee;
1970 : 137428 : entries[cur_group].prev_tx = NO_PREV_TX; // No previous transaction in group.
1971 : 137428 : entries[cur_group].first_tx = cur_group; // Transaction itself is first of group.
1972 : : // Insert the new group at the back of the groups linked list.
1973 : 137428 : entries[cur_group].prev_group = entries[SENTINEL].prev_group;
1974 : 137428 : entries[SENTINEL].prev_group = cur_group;
1975 : :
1976 : : // Start merge/swap cycle.
1977 : 137428 : DepGraphIndex next_group = SENTINEL; // We inserted at the end, so next group is sentinel.
1978 : 137428 : DepGraphIndex prev_group = entries[cur_group].prev_group;
1979 : : // Continue as long as the current group has higher feerate than the previous one.
1980 [ + + ]: 152568 : while (ByRatio{entries[cur_group].feerate} > ByRatio{entries[prev_group].feerate}) {
1981 : : // prev_group/cur_group/next_group refer to (the last transactions of) 3
1982 : : // consecutive entries in groups list.
1983 [ + - ]: 15140 : Assume(cur_group == entries[next_group].prev_group);
1984 : 15140 : Assume(prev_group == entries[cur_group].prev_group);
1985 : : // The sentinel has empty feerate, which is neither higher or lower than other
1986 : : // feerates. Thus, the while loop we are in here guarantees that cur_group and
1987 : : // prev_group are not the sentinel.
1988 : 15140 : Assume(cur_group != SENTINEL);
1989 : 15140 : Assume(prev_group != SENTINEL);
1990 [ + - ]: 15140 : if (entries[cur_group].deps.Overlaps(entries[prev_group].group)) {
1991 : : // There is a dependency between cur_group and prev_group; merge prev_group
1992 : : // into cur_group. The group/deps/feerate fields of prev_group remain unchanged
1993 : : // but become unused.
1994 : 15140 : entries[cur_group].group |= entries[prev_group].group;
1995 : 15140 : entries[cur_group].deps |= entries[prev_group].deps;
1996 : 15140 : entries[cur_group].feerate += entries[prev_group].feerate;
1997 : : // Make the first of the current group point to the tail of the previous group.
1998 : 15140 : entries[entries[cur_group].first_tx].prev_tx = prev_group;
1999 : : // The first of the previous group becomes the first of the newly-merged group.
2000 : 15140 : entries[cur_group].first_tx = entries[prev_group].first_tx;
2001 : : // The previous group becomes whatever group was before the former one.
2002 : 15140 : prev_group = entries[prev_group].prev_group;
2003 : 15140 : entries[cur_group].prev_group = prev_group;
2004 : : } else {
2005 : : // There is no dependency between cur_group and prev_group; swap them.
2006 : 0 : DepGraphIndex preprev_group = entries[prev_group].prev_group;
2007 : : // If PP, P, C, N were the old preprev, prev, cur, next groups, then the new
2008 : : // layout becomes [PP, C, P, N]. Update prev_groups to reflect that order.
2009 : 0 : entries[next_group].prev_group = prev_group;
2010 : 0 : entries[prev_group].prev_group = cur_group;
2011 : 0 : entries[cur_group].prev_group = preprev_group;
2012 : : // The current group remains the same, but the groups before/after it have
2013 : : // changed.
2014 : 0 : next_group = prev_group;
2015 : 0 : prev_group = preprev_group;
2016 : : }
2017 : : }
2018 : : }
2019 : :
2020 : : // Convert the entries back to linearization (overwriting the existing one).
2021 : 10290 : DepGraphIndex cur_group = entries[0].prev_group;
2022 : 10290 : DepGraphIndex done = 0;
2023 [ + + ]: 132578 : while (cur_group != SENTINEL) {
2024 : 122288 : DepGraphIndex cur_tx = cur_group;
2025 : : // Traverse the transactions of cur_group (from back to front), and write them in the
2026 : : // same order during odd passes, and reversed (front to back) in even passes.
2027 [ + + ]: 122288 : if (rev) {
2028 : : do {
2029 [ + + ]: 68714 : *(linearization.begin() + (done++)) = cur_tx - 1;
2030 [ + + ]: 68714 : cur_tx = entries[cur_tx].prev_tx;
2031 [ + + ]: 68714 : } while (cur_tx != NO_PREV_TX);
2032 : : } else {
2033 : : do {
2034 [ + + ]: 68714 : *(linearization.end() - (++done)) = cur_tx - 1;
2035 [ + + ]: 68714 : cur_tx = entries[cur_tx].prev_tx;
2036 [ + + ]: 68714 : } while (cur_tx != NO_PREV_TX);
2037 : : }
2038 : 122288 : cur_group = entries[cur_group].prev_group;
2039 : : }
2040 : 10290 : Assume(done == linearization.size());
2041 : : }
2042 : 5145 : }
2043 : :
2044 : : } // namespace cluster_linearize
2045 : :
2046 : : #endif // BITCOIN_CLUSTER_LINEARIZE_H
|