LCOV - code coverage report
Current view: top level - src/node - txorphanage.cpp (source / functions) Coverage Total Hit
Test: total_coverage.info Lines: 86.8 % 302 262
Test Date: 2025-08-01 05:08:13 Functions: 97.6 % 41 40
Branches: 63.9 % 330 211

             Branch data     Line data    Source code
       1                 :             : // Copyright (c) 2021-2022 The Bitcoin Core developers
       2                 :             : // Distributed under the MIT software license, see the accompanying
       3                 :             : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
       4                 :             : 
       5                 :             : #include <node/txorphanage.h>
       6                 :             : 
       7                 :             : #include <consensus/validation.h>
       8                 :             : #include <logging.h>
       9                 :             : #include <policy/policy.h>
      10                 :             : #include <primitives/transaction.h>
      11                 :             : #include <util/feefrac.h>
      12                 :             : #include <util/time.h>
      13                 :             : #include <util/hasher.h>
      14                 :             : 
      15                 :             : #include <boost/multi_index/indexed_by.hpp>
      16                 :             : #include <boost/multi_index/ordered_index.hpp>
      17                 :             : #include <boost/multi_index/tag.hpp>
      18                 :             : #include <boost/multi_index_container.hpp>
      19                 :             : 
      20                 :             : #include <cassert>
      21                 :             : #include <cmath>
      22                 :             : #include <unordered_map>
      23                 :             : 
      24                 :             : namespace node {
      25                 :             : /** Minimum NodeId for lower_bound lookups (in practice, NodeIds start at 0). */
      26                 :             : static constexpr NodeId MIN_PEER{std::numeric_limits<NodeId>::min()};
      27                 :             : /** Maximum NodeId for upper_bound lookups. */
      28                 :             : static constexpr NodeId MAX_PEER{std::numeric_limits<NodeId>::max()};
      29                 :             : class TxOrphanageImpl final : public TxOrphanage {
      30                 :             :     // Type alias for sequence numbers
      31                 :             :     using SequenceNumber = uint64_t;
      32                 :             :     /** Global sequence number, increment each time an announcement is added. */
      33                 :             :     SequenceNumber m_current_sequence{0};
      34                 :             : 
      35                 :             :     /** One orphan announcement. Each announcement (i.e. combination of wtxid, nodeid) is unique. There may be multiple
      36                 :             :      * announcements for the same tx, and multiple transactions with the same txid but different wtxid are possible. */
      37         [ +  - ]:        5840 :     struct Announcement
      38                 :             :     {
      39                 :             :         const CTransactionRef m_tx;
      40                 :             :         /** Which peer announced this tx */
      41                 :             :         const NodeId m_announcer;
      42                 :             :         /** What order this transaction entered the orphanage. */
      43                 :             :         const SequenceNumber m_entry_sequence;
      44                 :             :         /** Whether this tx should be reconsidered. Always starts out false. A peer's workset is the collection of all
      45                 :             :          * announcements with m_reconsider=true. */
      46                 :             :         bool m_reconsider{false};
      47                 :             : 
      48                 :        2920 :         Announcement(const CTransactionRef& tx, NodeId peer, SequenceNumber seq) :
      49         [ +  - ]:        2920 :             m_tx{tx}, m_announcer{peer}, m_entry_sequence{seq}
      50                 :             :         { }
      51                 :             : 
      52                 :             :         /** Get an approximation for "memory usage". The total memory is a function of the memory used to store the
      53                 :             :          * transaction itself, each entry in m_orphans, and each entry in m_outpoint_to_orphan_it. We use weight because
      54                 :             :          * it is often higher than the actual memory usage of the tranaction. This metric conveniently encompasses
      55                 :             :          * m_outpoint_to_orphan_it usage since input data does not get the witness discount, and makes it easier to
      56                 :             :          * reason about each peer's limits using well-understood transaction attributes. */
      57                 :       13726 :         TxOrphanage::Usage GetMemUsage()  const {
      58                 :        2858 :             return GetTransactionWeight(*m_tx);
      59                 :             :         }
      60                 :             : 
      61                 :             :         /** Get an approximation of how much this transaction contributes to latency in EraseForBlock and EraseForPeer.
      62                 :             :          * The computation time is a function of the number of entries in m_orphans (thus 1 per announcement) and the
      63                 :             :          * number of entries in m_outpoint_to_orphan_it (thus an additional 1 for every 10 inputs). Transactions with a
      64                 :             :          * small number of inputs (9 or fewer) are counted as 1 to make it easier to reason about each peer's limits in
      65                 :             :          * terms of "normal" transactions. */
      66                 :       11062 :         TxOrphanage::Count GetLatencyScore() const {
      67                 :        2858 :             return 1 + (m_tx->vin.size() / 10);
      68                 :             :         }
      69                 :             :     };
      70                 :             : 
      71                 :             :     // Index by wtxid, then peer
      72                 :             :     struct ByWtxid {};
      73                 :             :     using ByWtxidView = std::tuple<Wtxid, NodeId>;
      74                 :             :     struct WtxidExtractor
      75                 :             :     {
      76                 :             :         using result_type = ByWtxidView;
      77                 :      170966 :         result_type operator()(const Announcement& ann) const
      78                 :             :         {
      79   [ +  +  +  +  :      159748 :             return ByWtxidView{ann.m_tx->GetWitnessHash(), ann.m_announcer};
          +  +  +  +  +  
          -  +  -  +  +  
                   +  + ]
      80                 :             :         }
      81                 :             :     };
      82                 :             : 
      83                 :             :     // Sort by peer, then by whether it is ready to reconsider, then by recency.
      84                 :             :     struct ByPeer {};
      85                 :             :     using ByPeerView = std::tuple<NodeId, bool, SequenceNumber>;
      86                 :             :     struct ByPeerViewExtractor {
      87                 :             :         using result_type = ByPeerView;
      88                 :     2090419 :         result_type operator()(const Announcement& ann) const
      89                 :             :         {
      90   [ +  +  +  -  :     2090419 :             return ByPeerView{ann.m_announcer, ann.m_reconsider, ann.m_entry_sequence};
          +  +  +  +  +  
                +  +  + ]
      91                 :             :         }
      92                 :             :     };
      93                 :             : 
      94                 :             :     struct OrphanIndices final : boost::multi_index::indexed_by<
      95                 :             :         boost::multi_index::ordered_unique<boost::multi_index::tag<ByWtxid>, WtxidExtractor>,
      96                 :             :         boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor>
      97                 :             :     >{};
      98                 :             : 
      99                 :             :     using AnnouncementMap = boost::multi_index::multi_index_container<Announcement, OrphanIndices>;
     100                 :             :     template<typename Tag>
     101                 :             :     using Iter = typename AnnouncementMap::index<Tag>::type::iterator;
     102                 :             :     AnnouncementMap m_orphans;
     103                 :             : 
     104                 :             :     const TxOrphanage::Count m_max_global_latency_score{DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE};
     105                 :             :     const TxOrphanage::Usage m_reserved_usage_per_peer{DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER};
     106                 :             : 
     107                 :             :     /** Number of unique orphans by wtxid. Less than or equal to the number of entries in m_orphans. */
     108                 :             :     TxOrphanage::Count m_unique_orphans{0};
     109                 :             : 
     110                 :             :     /** Memory used by orphans (see Announcement::GetMemUsage()), deduplicated by wtxid. */
     111                 :             :     TxOrphanage::Usage m_unique_orphan_usage{0};
     112                 :             : 
     113                 :             :     /** The sum of each unique transaction's latency scores including the inputs only (see Announcement::GetLatencyScore
     114                 :             :      * but subtract 1 for the announcements themselves). The total orphanage's latency score is given by this value +
     115                 :             :      * the number of entries in m_orphans. */
     116                 :             :     TxOrphanage::Count m_unique_rounded_input_scores{0};
     117                 :             : 
     118                 :             :     /** Index from the parents' outputs to wtxids that exist in m_orphans. Used to find children of
     119                 :             :      * a transaction that can be reconsidered and to remove entries that conflict with a block.*/
     120                 :             :     std::unordered_map<COutPoint, std::set<Wtxid>, SaltedOutpointHasher> m_outpoint_to_orphan_it;
     121                 :             : 
     122                 :         280 :     struct PeerDoSInfo {
     123                 :             :         TxOrphanage::Usage m_total_usage{0};
     124                 :             :         TxOrphanage::Count m_count_announcements{0};
     125                 :             :         TxOrphanage::Count m_total_latency_score{0};
     126                 :             :         bool operator==(const PeerDoSInfo& other) const
     127                 :             :         {
     128                 :             :             return m_total_usage == other.m_total_usage &&
     129                 :             :                    m_count_announcements == other.m_count_announcements &&
     130                 :             :                    m_total_latency_score == other.m_total_latency_score;
     131                 :             :         }
     132                 :        2917 :         void Add(const Announcement& ann)
     133                 :             :         {
     134                 :        2917 :             m_total_usage += ann.GetMemUsage();
     135                 :        2917 :             m_total_latency_score += ann.GetLatencyScore();
     136                 :        2917 :             m_count_announcements += 1;
     137                 :        2917 :         }
     138                 :        2664 :         bool Subtract(const Announcement& ann)
     139                 :             :         {
     140                 :        2664 :             Assume(m_total_usage >= ann.GetMemUsage());
     141                 :        2664 :             Assume(m_total_latency_score >= ann.GetLatencyScore());
     142                 :        2664 :             Assume(m_count_announcements >= 1);
     143                 :             : 
     144                 :        2664 :             m_total_usage -= ann.GetMemUsage();
     145                 :        2664 :             m_total_latency_score -= ann.GetLatencyScore();
     146                 :        2664 :             m_count_announcements -= 1;
     147                 :        2664 :             return m_count_announcements == 0;
     148                 :             :         }
     149                 :             :         /** There are 2 DoS scores:
     150                 :             :         * - Latency score (ratio of total latency score / max allowed latency score)
     151                 :             :         * - Memory score (ratio of total memory usage / max allowed memory usage).
     152                 :             :         *
     153                 :             :         * If the peer is using more than the allowed for either resource, its DoS score is > 1.
     154                 :             :         * A peer having a DoS score > 1 does not necessarily mean that something is wrong, since we
     155                 :             :         * do not trim unless the orphanage exceeds global limits, but it means that this peer will
     156                 :             :         * be selected for trimming sooner. If the global latency score or global memory usage
     157                 :             :         * limits are exceeded, it must be that there is a peer whose DoS score > 1. */
     158                 :        3417 :         FeeFrac GetDosScore(TxOrphanage::Count max_peer_latency_score, TxOrphanage::Usage max_peer_bytes) const
     159                 :             :         {
     160         [ -  + ]:        3417 :             assert(max_peer_latency_score > 0);
     161         [ -  + ]:        3417 :             assert(max_peer_bytes > 0);
     162                 :        3417 :             const FeeFrac cpu_score(m_total_latency_score, max_peer_latency_score);
     163                 :        3417 :             const FeeFrac mem_score(m_total_usage, max_peer_bytes);
     164                 :        3417 :             return std::max<FeeFrac>(cpu_score, mem_score);
     165                 :             :         }
     166                 :             :     };
     167                 :             :     /** Store per-peer statistics. Used to determine each peer's DoS score. The size of this map is used to determine the
     168                 :             :      * number of peers and thus global {latency score, memory} limits. */
     169                 :             :     std::unordered_map<NodeId, PeerDoSInfo> m_peer_orphanage_info;
     170                 :             : 
     171                 :             :     /** Erase from m_orphans and update m_peer_orphanage_info. */
     172                 :             :     template<typename Tag>
     173                 :             :     void Erase(Iter<Tag> it);
     174                 :             : 
     175                 :             :     /** Check if there is exactly one announcement with the same wtxid as it. */
     176                 :             :     bool IsUnique(Iter<ByWtxid> it) const;
     177                 :             : 
     178                 :             :     /** Check if the orphanage needs trimming. */
     179                 :             :     bool NeedsTrim() const;
     180                 :             : public:
     181   [ +  -  +  - ]:        1220 :     TxOrphanageImpl() = default;
     182                 :           6 :     TxOrphanageImpl(Count max_global_ann, Usage reserved_peer_usage) :
     183                 :           6 :         m_max_global_latency_score{max_global_ann},
     184   [ +  -  +  - ]:           6 :         m_reserved_usage_per_peer{reserved_peer_usage}
     185                 :           6 :     {}
     186                 :        2452 :     ~TxOrphanageImpl() noexcept override = default;
     187                 :             : 
     188                 :             :     TxOrphanage::Count CountAnnouncements() const override;
     189                 :             :     TxOrphanage::Count CountUniqueOrphans() const override;
     190                 :             :     TxOrphanage::Count AnnouncementsFromPeer(NodeId peer) const override;
     191                 :             :     TxOrphanage::Count LatencyScoreFromPeer(NodeId peer) const override;
     192                 :             :     TxOrphanage::Usage UsageByPeer(NodeId peer) const override;
     193                 :             : 
     194                 :             :     TxOrphanage::Count MaxGlobalLatencyScore() const override;
     195                 :             :     TxOrphanage::Count TotalLatencyScore() const override;
     196                 :             :     TxOrphanage::Usage ReservedPeerUsage() const override;
     197                 :             : 
     198                 :             :     /** Maximum allowed (deduplicated) latency score for all tranactions (see Announcement::GetLatencyScore()). Dynamic
     199                 :             :      * based on number of peers. Each peer has an equal amount, but the global maximum latency score stays constant. The
     200                 :             :      * number of peers times MaxPeerLatencyScore() (rounded) adds up to MaxGlobalLatencyScore().  As long as every peer's
     201                 :             :      * m_total_latency_score / MaxPeerLatencyScore() < 1, MaxGlobalLatencyScore() is not exceeded. */
     202                 :             :     TxOrphanage::Count MaxPeerLatencyScore() const override;
     203                 :             : 
     204                 :             :     /** Maximum allowed (deduplicated) memory usage for all transactions (see Announcement::GetMemUsage()). Dynamic based
     205                 :             :      * on number of peers. More peers means more allowed memory usage. The number of peers times ReservedPeerUsage()
     206                 :             :      * adds up to MaxGlobalUsage(). As long as every peer's m_total_usage / ReservedPeerUsage() < 1, MaxGlobalUsage() is
     207                 :             :      * not exceeded. */
     208                 :             :     TxOrphanage::Usage MaxGlobalUsage() const override;
     209                 :             : 
     210                 :             :     bool AddTx(const CTransactionRef& tx, NodeId peer) override;
     211                 :             :     bool AddAnnouncer(const Wtxid& wtxid, NodeId peer) override;
     212                 :             :     CTransactionRef GetTx(const Wtxid& wtxid) const override;
     213                 :             :     bool HaveTx(const Wtxid& wtxid) const override;
     214                 :             :     bool HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const override;
     215                 :             :     CTransactionRef GetTxToReconsider(NodeId peer) override;
     216                 :             :     bool EraseTx(const Wtxid& wtxid) override;
     217                 :             :     void EraseForPeer(NodeId peer) override;
     218                 :             :     void EraseForBlock(const CBlock& block) override;
     219                 :             :     void LimitOrphans() override;
     220                 :             :     std::vector<std::pair<Wtxid, NodeId>> AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) override;
     221                 :             :     bool HaveTxToReconsider(NodeId peer) override;
     222                 :             :     std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const override;
     223                 :         919 :     size_t Size() const override { return m_unique_orphans; }
     224                 :             :     std::vector<OrphanTxBase> GetOrphanTransactions() const override;
     225                 :             :     TxOrphanage::Usage TotalOrphanUsage() const override;
     226                 :             :     void SanityCheck() const override;
     227                 :             : };
     228                 :             : 
     229                 :             : template<typename Tag>
     230                 :        2664 : void TxOrphanageImpl::Erase(Iter<Tag> it)
     231                 :             : {
     232                 :             :     // Update m_peer_orphanage_info and clean up entries if they point to an empty struct.
     233                 :             :     // This means peers that are not storing any orphans do not have an entry in
     234                 :             :     // m_peer_orphanage_info (they can be added back later if they announce another orphan) and
     235                 :             :     // ensures disconnected peers are not tracked forever.
     236                 :        2664 :     auto peer_it = m_peer_orphanage_info.find(it->m_announcer);
     237                 :        2664 :     Assume(peer_it != m_peer_orphanage_info.end());
     238         [ +  + ]:        2664 :     if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it);
     239                 :             : 
     240         [ +  + ]:        2664 :     if (IsUnique(m_orphans.project<ByWtxid>(it))) {
     241                 :        2623 :         m_unique_orphans -= 1;
     242                 :        2623 :         m_unique_rounded_input_scores -= it->GetLatencyScore() - 1;
     243                 :        2623 :         m_unique_orphan_usage -= it->GetMemUsage();
     244                 :             : 
     245                 :             :         // Remove references in m_outpoint_to_orphan_it
     246                 :        2623 :         const auto& wtxid{it->m_tx->GetWitnessHash()};
     247         [ +  + ]:        5271 :         for (const auto& input : it->m_tx->vin) {
     248                 :        2648 :             auto it_prev = m_outpoint_to_orphan_it.find(input.prevout);
     249         [ +  - ]:        5296 :             if (it_prev != m_outpoint_to_orphan_it.end()) {
     250         [ +  + ]:        2648 :                 it_prev->second.erase(wtxid);
     251                 :             :                 // Clean up keys if they point to an empty set.
     252         [ +  + ]:        2648 :                 if (it_prev->second.empty()) {
     253                 :        2641 :                     m_outpoint_to_orphan_it.erase(it_prev);
     254                 :             :                 }
     255                 :             :             }
     256                 :             :         }
     257                 :             :     }
     258                 :        2664 :     m_orphans.get<Tag>().erase(it);
     259                 :        2664 : }
     260                 :             : 
     261                 :        5581 : bool TxOrphanageImpl::IsUnique(Iter<ByWtxid> it) const
     262                 :             : {
     263                 :             :     // Iterators ByWtxid are sorted by wtxid, so check if neighboring elements have the same wtxid.
     264                 :        5581 :     auto& index = m_orphans.get<ByWtxid>();
     265         [ +  - ]:        5581 :     if (it == index.end()) return false;
     266   [ +  +  +  + ]:       16448 :     if (std::next(it) != index.end() && std::next(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
     267   [ +  +  +  + ]:        5538 :     if (it != index.begin() && std::prev(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
     268                 :             :     return true;
     269                 :             : }
     270                 :             : 
     271                 :        1748 : TxOrphanage::Usage TxOrphanageImpl::UsageByPeer(NodeId peer) const
     272                 :             : {
     273                 :        1748 :     auto it = m_peer_orphanage_info.find(peer);
     274         [ -  + ]:        1748 :     return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_usage;
     275                 :             : }
     276                 :             : 
     277                 :         247 : TxOrphanage::Count TxOrphanageImpl::CountAnnouncements() const { return m_orphans.size(); }
     278                 :             : 
     279                 :        4105 : TxOrphanage::Usage TxOrphanageImpl::TotalOrphanUsage() const { return m_unique_orphan_usage; }
     280                 :             : 
     281                 :         330 : TxOrphanage::Count TxOrphanageImpl::CountUniqueOrphans() const { return m_unique_orphans; }
     282                 :             : 
     283                 :          73 : TxOrphanage::Count TxOrphanageImpl::AnnouncementsFromPeer(NodeId peer) const {
     284                 :          73 :     auto it = m_peer_orphanage_info.find(peer);
     285         [ +  + ]:          73 :     return it == m_peer_orphanage_info.end() ? 0 : it->second.m_count_announcements;
     286                 :             : }
     287                 :             : 
     288                 :           3 : TxOrphanage::Count TxOrphanageImpl::LatencyScoreFromPeer(NodeId peer) const {
     289                 :           3 :     auto it = m_peer_orphanage_info.find(peer);
     290         [ +  - ]:           3 :     return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_latency_score;
     291                 :             : }
     292                 :             : 
     293                 :        2925 : bool TxOrphanageImpl::AddTx(const CTransactionRef& tx, NodeId peer)
     294                 :             : {
     295                 :        2925 :     const auto& wtxid{tx->GetWitnessHash()};
     296                 :        2925 :     const auto& txid{tx->GetHash()};
     297                 :             : 
     298                 :             :     // Ignore transactions above max standard size to avoid a send-big-orphans memory exhaustion attack.
     299                 :        2925 :     TxOrphanage::Usage sz = GetTransactionWeight(*tx);
     300         [ +  + ]:        2925 :     if (sz > MAX_STANDARD_TX_WEIGHT) {
     301   [ +  -  +  -  :          22 :         LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, txid.ToString(), wtxid.ToString());
                   +  - ]
     302                 :          11 :         return false;
     303                 :             :     }
     304                 :             : 
     305                 :             :     // We will return false if the tx already exists under a different peer.
     306                 :        2914 :     const bool brand_new{!HaveTx(wtxid)};
     307                 :             : 
     308         [ +  + ]:        2914 :     auto [iter, inserted] = m_orphans.get<ByWtxid>().emplace(tx, peer, m_current_sequence);
     309                 :             :     // If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
     310         [ +  + ]:        2914 :     if (!inserted) return false;
     311                 :             : 
     312                 :        2912 :     ++m_current_sequence;
     313                 :        2912 :     auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
     314                 :        2912 :     peer_info.Add(*iter);
     315                 :             : 
     316                 :             :     // Add links in m_outpoint_to_orphan_it
     317         [ +  + ]:        2912 :     if (brand_new) {
     318         [ +  + ]:        6406 :         for (const auto& input : tx->vin) {
     319                 :        3548 :             auto& wtxids_for_prevout = m_outpoint_to_orphan_it.try_emplace(input.prevout).first->second;
     320                 :        3548 :             wtxids_for_prevout.emplace(wtxid);
     321                 :             :         }
     322                 :             : 
     323                 :        2858 :         m_unique_orphans += 1;
     324                 :        2858 :         m_unique_orphan_usage += iter->GetMemUsage();
     325                 :        2858 :         m_unique_rounded_input_scores += iter->GetLatencyScore() - 1;
     326                 :             : 
     327   [ +  -  +  -  :        5716 :         LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n",
                   +  - ]
     328                 :             :                     txid.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_it.size());
     329                 :        2858 :         Assume(IsUnique(iter));
     330                 :             :     } else {
     331   [ +  -  +  -  :         108 :         LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
                   +  - ]
     332                 :             :                     peer, txid.ToString(), wtxid.ToString());
     333                 :          54 :         Assume(!IsUnique(iter));
     334                 :             :     }
     335                 :             :     return brand_new;
     336                 :             : }
     337                 :             : 
     338                 :           6 : bool TxOrphanageImpl::AddAnnouncer(const Wtxid& wtxid, NodeId peer)
     339                 :             : {
     340                 :           6 :     auto& index_by_wtxid = m_orphans.get<ByWtxid>();
     341                 :           6 :     auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
     342                 :             : 
     343                 :             :     // Do nothing if this transaction isn't already present. We can't create an entry if we don't
     344                 :             :     // have the tx data.
     345         [ +  - ]:           6 :     if (it == index_by_wtxid.end()) return false;
     346         [ +  - ]:           6 :     if (it->m_tx->GetWitnessHash() != wtxid) return false;
     347                 :             : 
     348                 :             :     // Add another announcement, copying the CTransactionRef from one that already exists.
     349                 :           6 :     const auto& ptx = it->m_tx;
     350         [ +  + ]:           6 :     auto [iter, inserted] = index_by_wtxid.emplace(ptx, peer, m_current_sequence);
     351                 :             :     // If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
     352         [ +  + ]:           6 :     if (!inserted) return false;
     353                 :             : 
     354                 :           5 :     ++m_current_sequence;
     355                 :           5 :     auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
     356                 :           5 :     peer_info.Add(*iter);
     357                 :             : 
     358                 :           5 :     const auto& txid = ptx->GetHash();
     359   [ +  -  +  -  :          10 :     LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
                   +  - ]
     360                 :             :                 peer, txid.ToString(), wtxid.ToString());
     361                 :             : 
     362                 :           5 :     Assume(!IsUnique(iter));
     363                 :           5 :     return true;
     364                 :             : }
     365                 :             : 
     366                 :       11954 : bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
     367                 :             : {
     368                 :       11954 :     auto& index_by_wtxid = m_orphans.get<ByWtxid>();
     369                 :             : 
     370                 :       11954 :     auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
     371   [ +  +  +  + ]:       11954 :     if (it == index_by_wtxid.end() || it->m_tx->GetWitnessHash() != wtxid) return false;
     372                 :             : 
     373                 :         104 :     auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
     374                 :         104 :     unsigned int num_ann{0};
     375                 :         104 :     const auto txid = it->m_tx->GetHash();
     376         [ +  + ]:         210 :     while (it != it_end) {
     377                 :         106 :         Assume(it->m_tx->GetWitnessHash() == wtxid);
     378                 :         106 :         Erase<ByWtxid>(it++);
     379                 :         106 :         num_ann += 1;
     380                 :             :     }
     381                 :             : 
     382   [ +  -  +  -  :         208 :     LogDebug(BCLog::TXPACKAGES, "removed orphan tx %s (wtxid=%s) (%u announcements)\n", txid.ToString(), wtxid.ToString(), num_ann);
                   +  - ]
     383                 :             : 
     384                 :             :     return true;
     385                 :             : }
     386                 :             : 
     387                 :             : /** Erase all entries by this peer. */
     388                 :        1756 : void TxOrphanageImpl::EraseForPeer(NodeId peer)
     389                 :             : {
     390                 :        1756 :     auto& index_by_peer = m_orphans.get<ByPeer>();
     391                 :        1756 :     auto it = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
     392   [ +  +  +  + ]:        1756 :     if (it == index_by_peer.end() || it->m_announcer != peer) return;
     393                 :             : 
     394                 :         125 :     unsigned int num_ann{0};
     395   [ +  +  +  + ]:        2509 :     while (it != index_by_peer.end() && it->m_announcer == peer) {
     396                 :             :         // Delete item, cleaning up m_outpoint_to_orphan_it iff this entry is unique by wtxid.
     397                 :        2384 :         Erase<ByPeer>(it++);
     398                 :        2384 :         num_ann += 1;
     399                 :             :     }
     400                 :         125 :     Assume(!m_peer_orphanage_info.contains(peer));
     401                 :             : 
     402   [ +  -  +  - ]:         125 :     if (num_ann > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", num_ann, peer);
     403                 :             : }
     404                 :             : 
     405                 :             : /** If the data structure needs trimming, evicts announcements by selecting the DoSiest peer and evicting its oldest
     406                 :             :  * announcement (sorting non-reconsiderable orphans first, to give reconsiderable orphans a greater chance of being
     407                 :             :  * processed). Does nothing if no global limits are exceeded.  This eviction strategy effectively "reserves" an
     408                 :             :  * amount of announcements and space for each peer. The reserved amount is protected from eviction even if there
     409                 :             :  * are peers spamming the orphanage.
     410                 :             :  */
     411                 :        2735 : void TxOrphanageImpl::LimitOrphans()
     412                 :             : {
     413         [ +  + ]:        2735 :     if (!NeedsTrim()) return;
     414                 :             : 
     415                 :         165 :     const auto original_unique_txns{CountUniqueOrphans()};
     416                 :             : 
     417                 :             :     // Even though it's possible for MaxPeerLatencyScore to increase within this call to LimitOrphans
     418                 :             :     // (e.g. if a peer's orphans are removed entirely, changing the number of peers), use consistent limits throughout.
     419                 :         165 :     const auto max_ann{MaxPeerLatencyScore()};
     420                 :         165 :     const auto max_mem{ReservedPeerUsage()};
     421                 :             : 
     422                 :             :     // We have exceeded the global limit(s). Now, identify who is using too much and evict their orphans.
     423                 :             :     // Create a heap of pairs (NodeId, DoS score), sorted by descending DoS score.
     424                 :         165 :     std::vector<std::pair<NodeId, FeeFrac>> heap_peer_dos;
     425         [ +  - ]:         165 :     heap_peer_dos.reserve(m_peer_orphanage_info.size());
     426         [ +  + ]:        3408 :     for (const auto& [nodeid, entry] : m_peer_orphanage_info) {
     427                 :             :         // Performance optimization: only consider peers with a DoS score > 1.
     428                 :        3243 :         const auto dos_score = entry.GetDosScore(max_ann, max_mem);
     429         [ +  + ]:        3243 :         if (dos_score >> FeeFrac{1, 1}) {
     430         [ +  - ]:         165 :             heap_peer_dos.emplace_back(nodeid, dos_score);
     431                 :             :         }
     432                 :             :     }
     433                 :         165 :     static constexpr auto compare_score = [](const auto& left, const auto& right) {
     434         [ #  # ]:           0 :         if (left.second != right.second) return left.second < right.second;
     435                 :             :         // Tiebreak by considering the more recent peer (higher NodeId) to be worse.
     436                 :           0 :         return left.first < right.first;
     437                 :             :     };
     438                 :         165 :     std::make_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
     439                 :             : 
     440                 :         165 :     unsigned int num_erased{0};
     441                 :             :     // This outer loop finds the peer with the highest DoS score, which is a fraction of {usage, announcements} used
     442                 :             :     // over the respective allowances. We continue until the orphanage is within global limits. That means some peers
     443                 :             :     // might still have a DoS score > 1 at the end.
     444                 :             :     // Note: if ratios are the same, FeeFrac tiebreaks by denominator. In practice, since the CPU denominator (number of
     445                 :             :     // announcements) is always lower, this means that a peer with only high number of announcements will be targeted
     446                 :             :     // before a peer using a lot of memory, even if they have the same ratios.
     447                 :         165 :     do {
     448                 :         165 :         Assume(!heap_peer_dos.empty());
     449                 :             :         // This is a max-heap, so the worst peer is at the front. pop_heap()
     450                 :             :         // moves it to the back, and the next worst peer is moved to the front.
     451                 :         165 :         std::pop_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
     452                 :         165 :         const auto [worst_peer, dos_score] = std::move(heap_peer_dos.back());
     453                 :         165 :         heap_peer_dos.pop_back();
     454                 :             : 
     455                 :             :         // If needs trim, then at least one peer has a DoS score higher than 1.
     456                 :         165 :         Assume(dos_score >> (FeeFrac{1, 1}));
     457                 :             : 
     458                 :         165 :         auto it_worst_peer = m_peer_orphanage_info.find(worst_peer);
     459                 :             : 
     460                 :             :         // This inner loop trims until this peer is no longer the DoSiest one or has a score within 1. The score 1 is
     461                 :             :         // just a conservative fallback: once the last peer goes below ratio 1, NeedsTrim() will return false anyway.
     462                 :             :         // We evict the oldest announcement(s) from this peer, sorting non-reconsiderable before reconsiderable.
     463                 :             :         // The number of inner loop iterations is bounded by the total number of announcements.
     464         [ +  - ]:         165 :         const auto& dos_threshold = heap_peer_dos.empty() ? FeeFrac{1, 1} : heap_peer_dos.front().second;
     465                 :         165 :         auto it_ann = m_orphans.get<ByPeer>().lower_bound(ByPeerView{worst_peer, false, 0});
     466   [ +  -  +  + ]:         291 :         while (NeedsTrim()) {
     467         [ +  - ]:         174 :             if (!Assume(it_ann->m_announcer == worst_peer)) break;
     468         [ +  - ]:         174 :             if (!Assume(it_ann != m_orphans.get<ByPeer>().end())) break;
     469                 :             : 
     470         [ +  - ]:         174 :             Erase<ByPeer>(it_ann++);
     471                 :         174 :             num_erased += 1;
     472                 :             : 
     473                 :             :             // If we erased the last orphan from this peer, it_worst_peer will be invalidated.
     474                 :         174 :             it_worst_peer = m_peer_orphanage_info.find(worst_peer);
     475   [ +  -  +  +  :         222 :             if (it_worst_peer == m_peer_orphanage_info.end() || it_worst_peer->second.GetDosScore(max_ann, max_mem) <= dos_threshold) break;
                   +  + ]
     476                 :             :         }
     477                 :             : 
     478   [ +  -  -  + ]:         165 :         if (!NeedsTrim()) break;
     479                 :             : 
     480                 :             :         // Unless this peer is empty, put it back in the heap so we continue to consider evicting its orphans.
     481                 :             :         // We may select this peer for evictions again if there are multiple DoSy peers.
     482   [ -  -  -  - ]:         165 :         if (it_worst_peer != m_peer_orphanage_info.end() && it_worst_peer->second.m_count_announcements > 0) {
     483         [ #  # ]:           0 :             heap_peer_dos.emplace_back(worst_peer, it_worst_peer->second.GetDosScore(max_ann, max_mem));
     484                 :           0 :             std::push_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
     485                 :             :         }
     486                 :             :     } while (true);
     487                 :             : 
     488         [ +  - ]:         165 :     const auto remaining_unique_orphans{CountUniqueOrphans()};
     489   [ +  -  +  -  :         165 :     LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx (%u announcements)\n", original_unique_txns - remaining_unique_orphans, num_erased);
                   +  - ]
     490                 :         165 : }
     491                 :             : 
     492                 :       11698 : std::vector<std::pair<Wtxid, NodeId>> TxOrphanageImpl::AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng)
     493                 :             : {
     494                 :       11698 :     std::vector<std::pair<Wtxid, NodeId>> ret;
     495                 :       11698 :     auto& index_by_wtxid = m_orphans.get<ByWtxid>();
     496         [ +  + ]:       37909 :     for (unsigned int i = 0; i < tx.vout.size(); i++) {
     497                 :       26211 :         const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i));
     498         [ +  + ]:       26259 :         if (it_by_prev != m_outpoint_to_orphan_it.end()) {
     499         [ +  + ]:          99 :             for (const auto& wtxid : it_by_prev->second) {
     500                 :             :                 // Belt and suspenders, each entry in m_outpoint_to_orphan_it should always have at least 1 announcement.
     501                 :          51 :                 auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
     502         [ -  + ]:          51 :                 if (!Assume(it != index_by_wtxid.end())) continue;
     503                 :             : 
     504                 :             :                 // Select a random peer to assign orphan processing, reducing wasted work if the orphan is still missing
     505                 :             :                 // inputs. However, we don't want to create an issue in which the assigned peer can purposefully stop us
     506                 :             :                 // from processing the orphan by disconnecting.
     507                 :          51 :                 auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
     508                 :          51 :                 const auto num_announcers{std::distance(it, it_end)};
     509         [ -  + ]:          51 :                 if (!Assume(num_announcers > 0)) continue;
     510                 :          51 :                 std::advance(it, rng.randrange(num_announcers));
     511                 :             : 
     512         [ +  - ]:          51 :                 if (!Assume(it->m_tx->GetWitnessHash() == wtxid)) break;
     513                 :             : 
     514                 :             :                 // Mark this orphan as ready to be reconsidered.
     515                 :          51 :                 static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = true; };
     516         [ +  - ]:          51 :                 if (!it->m_reconsider) {
     517                 :          51 :                     index_by_wtxid.modify(it, mark_reconsidered_modifier);
     518         [ +  - ]:          51 :                     ret.emplace_back(wtxid, it->m_announcer);
     519                 :             :                 }
     520                 :             : 
     521   [ +  -  +  -  :         102 :                 LogDebug(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n",
          +  -  +  -  +  
                      - ]
     522                 :             :                             it->m_tx->GetHash().ToString(), it->m_tx->GetWitnessHash().ToString(), it->m_announcer);
     523                 :             :             }
     524                 :             :         }
     525                 :             :     }
     526                 :       11698 :     return ret;
     527                 :           0 : }
     528                 :             : 
     529                 :       75770 : bool TxOrphanageImpl::HaveTx(const Wtxid& wtxid) const
     530                 :             : {
     531                 :       75770 :     auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
     532   [ +  +  +  + ]:       75770 :     return it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid;
     533                 :             : }
     534                 :             : 
     535                 :       27301 : CTransactionRef TxOrphanageImpl::GetTx(const Wtxid& wtxid) const
     536                 :             : {
     537                 :       27301 :     auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
     538   [ +  +  +  +  :       27301 :     if (it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid) return it_lower->m_tx;
                   +  - ]
     539                 :       27293 :     return nullptr;
     540                 :             : }
     541                 :             : 
     542                 :        2653 : bool TxOrphanageImpl::HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const
     543                 :             : {
     544                 :        2653 :     return m_orphans.get<ByWtxid>().count(ByWtxidView{wtxid, peer}) > 0;
     545                 :             : }
     546                 :             : 
     547                 :             : /** If there is a tx that can be reconsidered, return it and set it back to
     548                 :             :  * non-reconsiderable. Otherwise, return a nullptr. */
     549                 :      624068 : CTransactionRef TxOrphanageImpl::GetTxToReconsider(NodeId peer)
     550                 :             : {
     551                 :      624068 :     auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
     552   [ +  +  +  +  :      624068 :     if (it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider) {
                   +  - ]
     553                 :             :         // Flip m_reconsider. Even if this transaction stays in orphanage, it shouldn't be
     554                 :             :         // reconsidered again until there is a new reason to do so.
     555                 :          50 :         static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = false; };
     556                 :          50 :         m_orphans.get<ByPeer>().modify(it, mark_reconsidered_modifier);
     557         [ +  - ]:          50 :         return it->m_tx;
     558                 :             :     }
     559                 :      624018 :     return nullptr;
     560                 :             : }
     561                 :             : 
     562                 :             : /** Return whether there is a tx that can be reconsidered. */
     563                 :      167023 : bool TxOrphanageImpl::HaveTxToReconsider(NodeId peer)
     564                 :             : {
     565                 :      167023 :     auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
     566   [ +  +  +  +  :      167023 :     return it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider;
                   -  + ]
     567                 :             : }
     568                 :      121396 : void TxOrphanageImpl::EraseForBlock(const CBlock& block)
     569                 :             : {
     570         [ +  + ]:      121396 :     if (m_orphans.empty()) return;
     571                 :             : 
     572                 :          14 :     std::set<Wtxid> wtxids_to_erase;
     573         [ +  + ]:          94 :     for (const CTransactionRef& ptx : block.vtx) {
     574                 :          80 :         const CTransaction& block_tx = *ptx;
     575                 :             : 
     576                 :             :         // Which orphan pool entries must we evict?
     577         [ +  + ]:         161 :         for (const auto& input : block_tx.vin) {
     578                 :          81 :             auto it_prev = m_outpoint_to_orphan_it.find(input.prevout);
     579         [ +  + ]:         109 :             if (it_prev != m_outpoint_to_orphan_it.end()) {
     580                 :             :                 // Copy all wtxids to wtxids_to_erase.
     581         [ +  - ]:          28 :                 std::copy(it_prev->second.cbegin(), it_prev->second.cend(), std::inserter(wtxids_to_erase, wtxids_to_erase.end()));
     582                 :             :             }
     583                 :             :         }
     584                 :             :     }
     585                 :             : 
     586                 :          14 :     unsigned int num_erased{0};
     587         [ +  + ]:          42 :     for (const auto& wtxid : wtxids_to_erase) {
     588   [ +  -  -  + ]:          28 :         num_erased += EraseTx(wtxid) ? 1 : 0;
     589                 :             :     }
     590                 :             : 
     591         [ +  + ]:          14 :     if (num_erased != 0) {
     592   [ +  -  +  -  :           7 :         LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", num_erased);
                   +  - ]
     593                 :             :     }
     594                 :          14 :     Assume(wtxids_to_erase.size() == num_erased);
     595                 :          14 : }
     596                 :             : 
     597                 :             : /** Get all children that spend from this tx and were received from nodeid. Sorted from most
     598                 :             :  * recent to least recent. */
     599                 :          67 : std::vector<CTransactionRef> TxOrphanageImpl::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId peer) const
     600                 :             : {
     601                 :          67 :     std::vector<CTransactionRef> children_found;
     602                 :          67 :     const auto& parent_txid{parent->GetHash()};
     603                 :             : 
     604                 :             :     // Iterate through all orphans from this peer, in reverse order, so that more recent
     605                 :             :     // transactions are added first. Doing so helps avoid work when one of the orphans replaced
     606                 :             :     // an earlier one. Since we require the NodeId to match, one peer's announcement order does
     607                 :             :     // not bias how we process other peer's orphans.
     608                 :          67 :     auto& index_by_peer = m_orphans.get<ByPeer>();
     609                 :          67 :     auto it_upper = index_by_peer.upper_bound(ByPeerView{peer, true, std::numeric_limits<uint64_t>::max()});
     610                 :          67 :     auto it_lower = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
     611                 :             : 
     612         [ +  + ]:         134 :     while (it_upper != it_lower) {
     613                 :          67 :         --it_upper;
     614         [ +  - ]:          67 :         if (!Assume(it_upper->m_announcer == peer)) break;
     615                 :             :         // Check if this tx spends from parent.
     616         [ +  + ]:         108 :         for (const auto& input : it_upper->m_tx->vin) {
     617         [ +  + ]:          82 :             if (input.prevout.hash == parent_txid) {
     618         [ +  - ]:          41 :                 children_found.emplace_back(it_upper->m_tx);
     619                 :             :                 break;
     620                 :             :             }
     621                 :             :         }
     622                 :             :     }
     623                 :          67 :     return children_found;
     624                 :           0 : }
     625                 :             : 
     626                 :        2159 : std::vector<TxOrphanage::OrphanTxBase> TxOrphanageImpl::GetOrphanTransactions() const
     627                 :             : {
     628                 :        2159 :     std::vector<TxOrphanage::OrphanTxBase> result;
     629         [ +  - ]:        2159 :     result.reserve(m_unique_orphans);
     630                 :             : 
     631                 :        2159 :     auto& index_by_wtxid = m_orphans.get<ByWtxid>();
     632                 :        2159 :     auto it = index_by_wtxid.begin();
     633                 :        2159 :     std::set<NodeId> this_orphan_announcers;
     634         [ +  + ]:     1800547 :     while (it != index_by_wtxid.end()) {
     635         [ +  - ]:     1798388 :         this_orphan_announcers.insert(it->m_announcer);
     636                 :             :         // If this is the last entry, or the next entry has a different wtxid, build a OrphanTxBase.
     637   [ +  +  +  + ]:     5393026 :         if (std::next(it) == index_by_wtxid.end() || std::next(it)->m_tx->GetWitnessHash() != it->m_tx->GetWitnessHash()) {
     638         [ +  - ]:     1798379 :             result.emplace_back(it->m_tx, std::move(this_orphan_announcers));
     639                 :     1798379 :             this_orphan_announcers.clear();
     640                 :             :         }
     641                 :             : 
     642                 :     1798388 :         ++it;
     643                 :             :     }
     644                 :        2159 :     Assume(m_unique_orphans == result.size());
     645                 :             : 
     646                 :        2159 :     return result;
     647                 :        2159 : }
     648                 :             : 
     649                 :           0 : void TxOrphanageImpl::SanityCheck() const
     650                 :             : {
     651                 :           0 :     std::unordered_map<NodeId, PeerDoSInfo> reconstructed_peer_info;
     652                 :           0 :     std::map<Wtxid, std::pair<TxOrphanage::Usage, TxOrphanage::Count>> unique_wtxids_to_scores;
     653                 :           0 :     std::set<COutPoint> all_outpoints;
     654                 :             : 
     655         [ #  # ]:           0 :     for (auto it = m_orphans.begin(); it != m_orphans.end(); ++it) {
     656         [ #  # ]:           0 :         for (const auto& input : it->m_tx->vin) {
     657         [ #  # ]:           0 :             all_outpoints.insert(input.prevout);
     658                 :             :         }
     659         [ #  # ]:           0 :         unique_wtxids_to_scores.emplace(it->m_tx->GetWitnessHash(), std::make_pair(it->GetMemUsage(), it->GetLatencyScore() - 1));
     660                 :             : 
     661         [ #  # ]:           0 :         auto& peer_info = reconstructed_peer_info[it->m_announcer];
     662                 :           0 :         peer_info.m_total_usage += it->GetMemUsage();
     663                 :           0 :         peer_info.m_count_announcements += 1;
     664                 :           0 :         peer_info.m_total_latency_score += it->GetLatencyScore();
     665                 :             :     }
     666         [ #  # ]:           0 :     assert(reconstructed_peer_info.size() == m_peer_orphanage_info.size());
     667                 :             : 
     668                 :             :     // All outpoints exist in m_outpoint_to_orphan_it, all keys in m_outpoint_to_orphan_it correspond to some
     669                 :             :     // orphan, and all wtxids referenced in m_outpoint_to_orphan_it are also in m_orphans.
     670                 :             :     // This ensures m_outpoint_to_orphan_it is cleaned up.
     671         [ #  # ]:           0 :     assert(all_outpoints.size() == m_outpoint_to_orphan_it.size());
     672         [ #  # ]:           0 :     for (const auto& [outpoint, wtxid_set] : m_outpoint_to_orphan_it) {
     673         [ #  # ]:           0 :         assert(all_outpoints.contains(outpoint));
     674         [ #  # ]:           0 :         for (const auto& wtxid : wtxid_set) {
     675         [ #  # ]:           0 :             assert(unique_wtxids_to_scores.contains(wtxid));
     676                 :             :         }
     677                 :             :     }
     678                 :             : 
     679                 :             :     // Cached m_unique_orphans value is correct.
     680         [ #  # ]:           0 :     assert(m_orphans.size() >= m_unique_orphans);
     681         [ #  # ]:           0 :     assert(m_orphans.size() <= m_peer_orphanage_info.size() * m_unique_orphans);
     682         [ #  # ]:           0 :     assert(unique_wtxids_to_scores.size() == m_unique_orphans);
     683                 :             : 
     684                 :           0 :     const auto calculated_dedup_usage = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
     685                 :           0 :         TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.first; });
     686         [ #  # ]:           0 :     assert(calculated_dedup_usage == m_unique_orphan_usage);
     687                 :             : 
     688                 :             :     // Global usage is deduplicated, should be less than or equal to the sum of all per-peer usages.
     689                 :           0 :     const auto summed_peer_usage = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
     690                 :           0 :         TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.m_total_usage; });
     691         [ #  # ]:           0 :     assert(summed_peer_usage >= m_unique_orphan_usage);
     692                 :             : 
     693                 :             :     // Cached m_unique_rounded_input_scores value is correct.
     694                 :           0 :     const auto calculated_total_latency_score = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
     695                 :           0 :         TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.second; });
     696         [ #  # ]:           0 :     assert(calculated_total_latency_score == m_unique_rounded_input_scores);
     697                 :             : 
     698                 :             :     // Global latency score is deduplicated, should be less than or equal to the sum of all per-peer latency scores.
     699                 :           0 :     const auto summed_peer_latency_score = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
     700                 :           0 :         TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.m_total_latency_score; });
     701         [ #  # ]:           0 :     assert(summed_peer_latency_score >= m_unique_rounded_input_scores + m_orphans.size());
     702                 :           0 : }
     703                 :             : 
     704                 :        3324 : TxOrphanage::Count TxOrphanageImpl::MaxGlobalLatencyScore() const { return m_max_global_latency_score; }
     705                 :        3319 : TxOrphanage::Count TxOrphanageImpl::TotalLatencyScore() const { return m_unique_rounded_input_scores + m_orphans.size(); }
     706                 :         172 : TxOrphanage::Usage TxOrphanageImpl::ReservedPeerUsage() const { return m_reserved_usage_per_peer; }
     707         [ +  + ]:         183 : TxOrphanage::Count TxOrphanageImpl::MaxPeerLatencyScore() const { return m_max_global_latency_score / std::max<unsigned int>(m_peer_orphanage_info.size(), 1); }
     708         [ +  + ]:        3209 : TxOrphanage::Usage TxOrphanageImpl::MaxGlobalUsage() const { return m_reserved_usage_per_peer * std::max<int64_t>(m_peer_orphanage_info.size(), 1); }
     709                 :             : 
     710                 :        3191 : bool TxOrphanageImpl::NeedsTrim() const
     711                 :             : {
     712   [ +  +  +  + ]:        3191 :     return TotalLatencyScore() > MaxGlobalLatencyScore() || TotalOrphanUsage() > MaxGlobalUsage();
     713                 :             : }
     714                 :        1220 : std::unique_ptr<TxOrphanage> MakeTxOrphanage() noexcept
     715                 :             : {
     716         [ -  + ]:        1220 :     return std::make_unique<TxOrphanageImpl>();
     717                 :             : }
     718                 :           6 : std::unique_ptr<TxOrphanage> MakeTxOrphanage(TxOrphanage::Count max_global_ann, TxOrphanage::Usage reserved_peer_usage) noexcept
     719                 :             : {
     720         [ -  + ]:           6 :     return std::make_unique<TxOrphanageImpl>(max_global_ann, reserved_peer_usage);
     721                 :             : }
     722                 :             : } // namespace node
        

Generated by: LCOV version 2.0-1