@@ -807,7 +807,7 @@ class PeerManagerImpl final : public PeerManager
807807
808808 uint32_t GetFetchFlags (const Peer& peer) const ;
809809
810- std::atomic< std::chrono::microseconds> m_next_inv_to_inbounds{0us} ;
810+ std::map< uint64_t , std::chrono::microseconds> m_next_inv_to_inbounds_per_network_key GUARDED_BY (g_msgproc_mutex) ;
811811
812812 /* * Number of nodes with fSyncStarted. */
813813 int nSyncStarted GUARDED_BY (cs_main) = 0;
@@ -837,12 +837,14 @@ class PeerManagerImpl final : public PeerManager
837837
838838 /* *
839839 * For sending `inv`s to inbound peers, we use a single (exponentially
840- * distributed) timer for all peers. If we used a separate timer for each
840+ * distributed) timer for all peers with the same network key . If we used a separate timer for each
841841 * peer, a spy node could make multiple inbound connections to us to
842- * accurately determine when we received the transaction (and potentially
843- * determine the transaction's origin). */
842+ * accurately determine when we received a transaction (and potentially
843+ * determine the transaction's origin). Each network key has its own timer
844+ * to make fingerprinting harder. */
844845 std::chrono::microseconds NextInvToInbounds (std::chrono::microseconds now,
845- std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
846+ std::chrono::seconds average_interval,
847+ uint64_t network_key) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
846848
847849
848850 // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
@@ -1143,15 +1145,15 @@ static bool CanServeWitnesses(const Peer& peer)
11431145}
11441146
11451147std::chrono::microseconds PeerManagerImpl::NextInvToInbounds (std::chrono::microseconds now,
1146- std::chrono::seconds average_interval)
1148+ std::chrono::seconds average_interval,
1149+ uint64_t network_key)
11471150{
1148- if (m_next_inv_to_inbounds.load () < now) {
1149- // If this function were called from multiple threads simultaneously
1150- // it would possible that both update the next send variable, and return a different result to their caller.
1151- // This is not possible in practice as only the net processing thread invokes this function.
1152- m_next_inv_to_inbounds = now + m_rng.rand_exp_duration (average_interval);
1151+ auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace (network_key, 0us);
1152+ auto & timer{it->second };
1153+ if (timer < now) {
1154+ timer = now + m_rng.rand_exp_duration (average_interval);
11531155 }
1154- return m_next_inv_to_inbounds ;
1156+ return timer ;
11551157}
11561158
11571159bool PeerManagerImpl::IsBlockRequested (const uint256& hash)
@@ -5711,7 +5713,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
57115713 if (tx_relay->m_next_inv_send_time < current_time) {
57125714 fSendTrickle = true ;
57135715 if (pto->IsInboundConn ()) {
5714- tx_relay->m_next_inv_send_time = NextInvToInbounds (current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
5716+ tx_relay->m_next_inv_send_time = NextInvToInbounds (current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL, pto-> m_network_key );
57155717 } else {
57165718 tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration (OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
57175719 }
0 commit comments