From 0f7d4ee4e8281ed141a6ebb7e0edee7b864e4dcf Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Wed, 17 Sep 2025 23:58:09 -0400 Subject: [PATCH] p2p: Use different inbound inv timer per network Currently nodes schedule their invs to all inbound peers at the same time. It is trivial to make use this timing pattern for fingerprinting identities on different networks. Using a separate timers for each network will make the fingerprinting harder. --- src/net_processing.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 336669a8545..8a39cb587a9 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -807,7 +807,7 @@ private: uint32_t GetFetchFlags(const Peer& peer) const; - std::atomic m_next_inv_to_inbounds{0us}; + std::map m_next_inv_to_inbounds_per_network_key GUARDED_BY(g_msgproc_mutex); /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; @@ -837,12 +837,14 @@ private: /** * For sending `inv`s to inbound peers, we use a single (exponentially - * distributed) timer for all peers. If we used a separate timer for each + * distributed) timer for all peers with the same network key. If we used a separate timer for each * peer, a spy node could make multiple inbound connections to us to - * accurately determine when we received the transaction (and potentially - * determine the transaction's origin). */ + * accurately determine when we received a transaction (and potentially + * determine the transaction's origin). Each network key has its own timer + * to make fingerprinting harder. */ std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + std::chrono::seconds average_interval, + uint64_t network_key) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); // All of the following cache a recent block, and are protected by m_most_recent_block_mutex @@ -1143,15 +1145,15 @@ static bool CanServeWitnesses(const Peer& peer) } std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval) + std::chrono::seconds average_interval, + uint64_t network_key) { - if (m_next_inv_to_inbounds.load() < now) { - // If this function were called from multiple threads simultaneously - // it would possible that both update the next send variable, and return a different result to their caller. - // This is not possible in practice as only the net processing thread invokes this function. - m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); + auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace(network_key, 0us); + auto& timer{it->second}; + if (timer < now) { + timer = now + m_rng.rand_exp_duration(average_interval); } - return m_next_inv_to_inbounds; + return timer; } bool PeerManagerImpl::IsBlockRequested(const uint256& hash) @@ -5711,7 +5713,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (tx_relay->m_next_inv_send_time < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { - tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); + tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL, pto->m_network_key); } else { tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); }