aboutsummaryrefslogtreecommitdiff
path: root/src/p2p/net_node.inl
diff options
context:
space:
mode:
Diffstat (limited to 'src/p2p/net_node.inl')
-rw-r--r--src/p2p/net_node.inl434
1 files changed, 192 insertions, 242 deletions
diff --git a/src/p2p/net_node.inl b/src/p2p/net_node.inl
index 08bc76d26..2d7600f7a 100644
--- a/src/p2p/net_node.inl
+++ b/src/p2p/net_node.inl
@@ -435,6 +435,8 @@ namespace nodetool
if (command_line::has_arg(vm, arg_p2p_seed_node))
{
+ boost::unique_lock<boost::shared_mutex> lock(m_seed_nodes_lock);
+
if (!parse_peers_and_add_to_container(vm, arg_p2p_seed_node, m_seed_nodes))
return false;
}
@@ -628,11 +630,122 @@ namespace nodetool
full_addrs.insert("195.154.123.123:18080");
full_addrs.insert("212.83.172.165:18080");
full_addrs.insert("192.110.160.146:18080");
+ full_addrs.insert("88.198.163.90:18080");
+ full_addrs.insert("95.217.25.101:18080");
}
return full_addrs;
}
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
+ std::set<std::string> node_server<t_payload_net_handler>::get_seed_nodes()
+ {
+ if (!m_exclusive_peers.empty() || m_offline)
+ {
+ return {};
+ }
+ if (m_nettype == cryptonote::TESTNET)
+ {
+ return get_seed_nodes(cryptonote::TESTNET);
+ }
+ if (m_nettype == cryptonote::STAGENET)
+ {
+ return get_seed_nodes(cryptonote::STAGENET);
+ }
+
+ std::set<std::string> full_addrs;
+
+ // for each hostname in the seed nodes list, attempt to DNS resolve and
+ // add the result addresses as seed nodes
+ // TODO: at some point add IPv6 support, but that won't be relevant
+ // for some time yet.
+
+ std::vector<std::vector<std::string>> dns_results;
+ dns_results.resize(m_seed_nodes_list.size());
+
+ // some libc implementation provide only a very small stack
+ // for threads, e.g. musl only gives +- 80kb, which is not
+ // enough to do a resolve with unbound. we request a stack
+ // of 1 mb, which should be plenty
+ boost::thread::attributes thread_attributes;
+ thread_attributes.set_stack_size(1024*1024);
+
+ std::list<boost::thread> dns_threads;
+ uint64_t result_index = 0;
+ for (const std::string& addr_str : m_seed_nodes_list)
+ {
+ boost::thread th = boost::thread(thread_attributes, [=, &dns_results, &addr_str]
+ {
+ MDEBUG("dns_threads[" << result_index << "] created for: " << addr_str);
+ // TODO: care about dnssec avail/valid
+ bool avail, valid;
+ std::vector<std::string> addr_list;
+
+ try
+ {
+ addr_list = tools::DNSResolver::instance().get_ipv4(addr_str, avail, valid);
+ MDEBUG("dns_threads[" << result_index << "] DNS resolve done");
+ boost::this_thread::interruption_point();
+ }
+ catch(const boost::thread_interrupted&)
+ {
+ // thread interruption request
+ // even if we now have results, finish thread without setting
+ // result variables, which are now out of scope in main thread
+ MWARNING("dns_threads[" << result_index << "] interrupted");
+ return;
+ }
+
+ MINFO("dns_threads[" << result_index << "] addr_str: " << addr_str << " number of results: " << addr_list.size());
+ dns_results[result_index] = addr_list;
+ });
+
+ dns_threads.push_back(std::move(th));
+ ++result_index;
+ }
+
+ MDEBUG("dns_threads created, now waiting for completion or timeout of " << CRYPTONOTE_DNS_TIMEOUT_MS << "ms");
+ boost::chrono::system_clock::time_point deadline = boost::chrono::system_clock::now() + boost::chrono::milliseconds(CRYPTONOTE_DNS_TIMEOUT_MS);
+ uint64_t i = 0;
+ for (boost::thread& th : dns_threads)
+ {
+ if (! th.try_join_until(deadline))
+ {
+ MWARNING("dns_threads[" << i << "] timed out, sending interrupt");
+ th.interrupt();
+ }
+ ++i;
+ }
+
+ i = 0;
+ for (const auto& result : dns_results)
+ {
+ MDEBUG("DNS lookup for " << m_seed_nodes_list[i] << ": " << result.size() << " results");
+ // if no results for node, thread's lookup likely timed out
+ if (result.size())
+ {
+ for (const auto& addr_string : result)
+ full_addrs.insert(addr_string + ":" + std::to_string(cryptonote::get_config(m_nettype).P2P_DEFAULT_PORT));
+ }
+ ++i;
+ }
+
+ // append the fallback nodes if we have too few seed nodes to start with
+ if (full_addrs.size() < MIN_WANTED_SEED_NODES)
+ {
+ if (full_addrs.empty())
+ MINFO("DNS seed node lookup either timed out or failed, falling back to defaults");
+ else
+ MINFO("Not enough DNS seed nodes found, using fallback defaults too");
+
+ for (const auto &peer: get_seed_nodes(cryptonote::MAINNET))
+ full_addrs.insert(peer);
+ m_fallback_seed_nodes_added.test_and_set();
+ }
+
+ return full_addrs;
+ }
+ //-----------------------------------------------------------------------------------
+ template<class t_payload_net_handler>
typename node_server<t_payload_net_handler>::network_zone& node_server<t_payload_net_handler>::add_zone(const epee::net_utils::zone zone)
{
const auto zone_ = m_network_zones.lower_bound(zone);
@@ -646,124 +759,22 @@ namespace nodetool
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::init(const boost::program_options::variables_map& vm)
{
- std::set<std::string> full_addrs;
-
bool res = handle_command_line(vm);
CHECK_AND_ASSERT_MES(res, false, "Failed to handle command line");
- m_fallback_seed_nodes_added = false;
if (m_nettype == cryptonote::TESTNET)
{
memcpy(&m_network_id, &::config::testnet::NETWORK_ID, 16);
- full_addrs = get_seed_nodes(cryptonote::TESTNET);
}
else if (m_nettype == cryptonote::STAGENET)
{
memcpy(&m_network_id, &::config::stagenet::NETWORK_ID, 16);
- full_addrs = get_seed_nodes(cryptonote::STAGENET);
}
else
{
memcpy(&m_network_id, &::config::NETWORK_ID, 16);
- if (m_exclusive_peers.empty() && !m_offline)
- {
- // for each hostname in the seed nodes list, attempt to DNS resolve and
- // add the result addresses as seed nodes
- // TODO: at some point add IPv6 support, but that won't be relevant
- // for some time yet.
-
- std::vector<std::vector<std::string>> dns_results;
- dns_results.resize(m_seed_nodes_list.size());
-
- // some libc implementation provide only a very small stack
- // for threads, e.g. musl only gives +- 80kb, which is not
- // enough to do a resolve with unbound. we request a stack
- // of 1 mb, which should be plenty
- boost::thread::attributes thread_attributes;
- thread_attributes.set_stack_size(1024*1024);
-
- std::list<boost::thread> dns_threads;
- uint64_t result_index = 0;
- for (const std::string& addr_str : m_seed_nodes_list)
- {
- boost::thread th = boost::thread(thread_attributes, [=, &dns_results, &addr_str]
- {
- MDEBUG("dns_threads[" << result_index << "] created for: " << addr_str);
- // TODO: care about dnssec avail/valid
- bool avail, valid;
- std::vector<std::string> addr_list;
-
- try
- {
- addr_list = tools::DNSResolver::instance().get_ipv4(addr_str, avail, valid);
- MDEBUG("dns_threads[" << result_index << "] DNS resolve done");
- boost::this_thread::interruption_point();
- }
- catch(const boost::thread_interrupted&)
- {
- // thread interruption request
- // even if we now have results, finish thread without setting
- // result variables, which are now out of scope in main thread
- MWARNING("dns_threads[" << result_index << "] interrupted");
- return;
- }
-
- MINFO("dns_threads[" << result_index << "] addr_str: " << addr_str << " number of results: " << addr_list.size());
- dns_results[result_index] = addr_list;
- });
-
- dns_threads.push_back(std::move(th));
- ++result_index;
- }
-
- MDEBUG("dns_threads created, now waiting for completion or timeout of " << CRYPTONOTE_DNS_TIMEOUT_MS << "ms");
- boost::chrono::system_clock::time_point deadline = boost::chrono::system_clock::now() + boost::chrono::milliseconds(CRYPTONOTE_DNS_TIMEOUT_MS);
- uint64_t i = 0;
- for (boost::thread& th : dns_threads)
- {
- if (! th.try_join_until(deadline))
- {
- MWARNING("dns_threads[" << i << "] timed out, sending interrupt");
- th.interrupt();
- }
- ++i;
- }
-
- i = 0;
- for (const auto& result : dns_results)
- {
- MDEBUG("DNS lookup for " << m_seed_nodes_list[i] << ": " << result.size() << " results");
- // if no results for node, thread's lookup likely timed out
- if (result.size())
- {
- for (const auto& addr_string : result)
- full_addrs.insert(addr_string + ":" + std::to_string(cryptonote::get_config(m_nettype).P2P_DEFAULT_PORT));
- }
- ++i;
- }
-
- // append the fallback nodes if we have too few seed nodes to start with
- if (full_addrs.size() < MIN_WANTED_SEED_NODES)
- {
- if (full_addrs.empty())
- MINFO("DNS seed node lookup either timed out or failed, falling back to defaults");
- else
- MINFO("Not enough DNS seed nodes found, using fallback defaults too");
-
- for (const auto &peer: get_seed_nodes(cryptonote::MAINNET))
- full_addrs.insert(peer);
- m_fallback_seed_nodes_added = true;
- }
- }
}
- for (const auto& full_addr : full_addrs)
- {
- MDEBUG("Seed node: " << full_addr);
- append_net_address(m_seed_nodes, full_addr, cryptonote::get_config(m_nettype).P2P_DEFAULT_PORT);
- }
- MDEBUG("Number of seed nodes: " << m_seed_nodes.size());
-
m_config_folder = command_line::get_arg(vm, cryptonote::arg_data_dir);
network_zone& public_zone = m_network_zones.at(epee::net_utils::zone::public_);
@@ -813,7 +824,6 @@ namespace nodetool
//only in case if we really sure that we have external visible ip
m_have_address = true;
- m_last_stat_request_time = 0;
//configure self
@@ -940,15 +950,6 @@ namespace nodetool
}
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
- uint64_t node_server<t_payload_net_handler>::get_connections_count()
- {
- std::uint64_t count = 0;
- for (auto& zone : m_network_zones)
- count += zone.second.m_net_server.get_config_object().get_connections_count();
- return count;
- }
- //-----------------------------------------------------------------------------------
- template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::deinit()
{
kill();
@@ -1023,15 +1024,18 @@ namespace nodetool
epee::simple_event ev;
std::atomic<bool> hsh_result(false);
+ bool timeout = false;
bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_HANDSHAKE::response>(context_, COMMAND_HANDSHAKE::ID, arg, zone.m_net_server.get_config_object(),
- [this, &pi, &ev, &hsh_result, &just_take_peerlist, &context_](int code, const typename COMMAND_HANDSHAKE::response& rsp, p2p_connection_context& context)
+ [this, &pi, &ev, &hsh_result, &just_take_peerlist, &context_, &timeout](int code, const typename COMMAND_HANDSHAKE::response& rsp, p2p_connection_context& context)
{
epee::misc_utils::auto_scope_leave_caller scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ev.raise();});
if(code < 0)
{
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE invoke failed. (" << code << ", " << epee::levin::get_err_descr(code) << ")");
+ if (code == LEVIN_ERROR_CONNECTION_TIMEDOUT || code == LEVIN_ERROR_CONNECTION_DESTROYED)
+ timeout = true;
return;
}
@@ -1041,7 +1045,7 @@ namespace nodetool
return;
}
- if(!handle_remote_peerlist(rsp.local_peerlist_new, rsp.node_data.local_time, context))
+ if(!handle_remote_peerlist(rsp.local_peerlist_new, context))
{
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE: failed to handle_remote_peerlist(...), closing connection.");
add_host_fail(context.m_remote_address);
@@ -1060,17 +1064,15 @@ namespace nodetool
pi = context.peer_id = rsp.node_data.peer_id;
context.m_rpc_port = rsp.node_data.rpc_port;
context.m_rpc_credits_per_hash = rsp.node_data.rpc_credits_per_hash;
- m_network_zones.at(context.m_remote_address.get_zone()).m_peerlist.set_peer_just_seen(rsp.node_data.peer_id, context.m_remote_address, context.m_pruning_seed, context.m_rpc_port, context.m_rpc_credits_per_hash);
+ network_zone& zone = m_network_zones.at(context.m_remote_address.get_zone());
+ zone.m_peerlist.set_peer_just_seen(rsp.node_data.peer_id, context.m_remote_address, context.m_pruning_seed, context.m_rpc_port, context.m_rpc_credits_per_hash);
// move
- for (auto const& zone : m_network_zones)
+ if(rsp.node_data.peer_id == zone.m_config.m_peer_id)
{
- if(rsp.node_data.peer_id == zone.second.m_config.m_peer_id)
- {
- LOG_DEBUG_CC(context, "Connection to self detected, dropping connection");
- hsh_result = false;
- return;
- }
+ LOG_DEBUG_CC(context, "Connection to self detected, dropping connection");
+ hsh_result = false;
+ return;
}
LOG_INFO_CC(context, "New connection handshaked, pruning seed " << epee::string_tools::to_string_hex(context.m_pruning_seed));
LOG_DEBUG_CC(context, " COMMAND_HANDSHAKE INVOKED OK");
@@ -1089,7 +1091,8 @@ namespace nodetool
if(!hsh_result)
{
LOG_WARNING_CC(context_, "COMMAND_HANDSHAKE Failed");
- m_network_zones.at(context_.m_remote_address.get_zone()).m_net_server.get_config_object().close(context_.m_connection_id);
+ if (!timeout)
+ zone.m_net_server.get_config_object().close(context_.m_connection_id);
}
else if (!just_take_peerlist)
{
@@ -1119,7 +1122,7 @@ namespace nodetool
return;
}
- if(!handle_remote_peerlist(rsp.local_peerlist_new, rsp.local_time, context))
+ if(!handle_remote_peerlist(rsp.local_peerlist_new, context))
{
LOG_WARNING_CC(context, "COMMAND_TIMED_SYNC: failed to handle_remote_peerlist(...), closing connection.");
m_network_zones.at(context.m_remote_address.get_zone()).m_net_server.get_config_object().close(context.m_connection_id );
@@ -1262,7 +1265,7 @@ namespace nodetool
bool is_priority = is_priority_node(na);
LOG_PRINT_CC_PRIORITY_NODE(is_priority, bool(con), "Connect failed to " << na.str()
/*<< ", try " << try_count*/);
- //m_peerlist.set_peer_unreachable(pe);
+ record_addr_failed(na);
return false;
}
@@ -1276,7 +1279,7 @@ namespace nodetool
LOG_PRINT_CC_PRIORITY_NODE(is_priority, *con, "Failed to HANDSHAKE with peer "
<< na.str()
/*<< ", try " << try_count*/);
- zone.m_net_server.get_config_object().close(con->m_connection_id);
+ record_addr_failed(na);
return false;
}
@@ -1327,6 +1330,7 @@ namespace nodetool
bool is_priority = is_priority_node(na);
LOG_PRINT_CC_PRIORITY_NODE(is_priority, p2p_connection_context{}, "Connect failed to " << na.str());
+ record_addr_failed(na);
return false;
}
@@ -1338,7 +1342,7 @@ namespace nodetool
bool is_priority = is_priority_node(na);
LOG_PRINT_CC_PRIORITY_NODE(is_priority, *con, "Failed to HANDSHAKE with peer " << na.str());
- zone.m_net_server.get_config_object().close(con->m_connection_id);
+ record_addr_failed(na);
return false;
}
@@ -1353,6 +1357,13 @@ namespace nodetool
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
+ void node_server<t_payload_net_handler>::record_addr_failed(const epee::net_utils::network_address& addr)
+ {
+ CRITICAL_REGION_LOCAL(m_conn_fails_cache_lock);
+ m_conn_fails_cache[addr.host_str()] = time(NULL);
+ }
+ //-----------------------------------------------------------------------------------
+ template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::is_addr_recently_failed(const epee::net_utils::network_address& addr)
{
CRITICAL_REGION_LOCAL(m_conn_fails_cache_lock);
@@ -1434,10 +1445,10 @@ namespace nodetool
std::deque<size_t> filtered;
const size_t limit = use_white_list ? 20 : std::numeric_limits<size_t>::max();
- size_t idx = 0, skipped = 0;
for (int step = 0; step < 2; ++step)
{
bool skip_duplicate_class_B = step == 0;
+ size_t idx = 0, skipped = 0;
zone.m_peerlist.foreach (use_white_list, [&classB, &filtered, &idx, &skipped, skip_duplicate_class_B, limit, next_needed_pruning_stripe](const peerlist_entry &pe){
if (filtered.size() >= limit)
return false;
@@ -1539,10 +1550,25 @@ namespace nodetool
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::connect_to_seed()
{
+ boost::upgrade_lock<boost::shared_mutex> seed_nodes_upgrade_lock(m_seed_nodes_lock);
+
+ if (!m_seed_nodes_initialized)
+ {
+ boost::upgrade_to_unique_lock<boost::shared_mutex> seed_nodes_lock(seed_nodes_upgrade_lock);
+ m_seed_nodes_initialized = true;
+ for (const auto& full_addr : get_seed_nodes())
+ {
+ MDEBUG("Seed node: " << full_addr);
+ append_net_address(m_seed_nodes, full_addr, cryptonote::get_config(m_nettype).P2P_DEFAULT_PORT);
+ }
+ MDEBUG("Number of seed nodes: " << m_seed_nodes.size());
+ }
+
if (m_seed_nodes.empty() || m_offline || !m_exclusive_peers.empty())
return true;
size_t try_count = 0;
+ bool is_connected_to_at_least_one_seed_node = false;
size_t current_index = crypto::rand_idx(m_seed_nodes.size());
const net_server& server = m_network_zones.at(epee::net_utils::zone::public_).m_net_server;
while(true)
@@ -1550,21 +1576,28 @@ namespace nodetool
if(server.is_stop_signal_sent())
return false;
- if(try_to_connect_and_handshake_with_new_peer(m_seed_nodes[current_index], true))
+ peerlist_entry pe_seed{};
+ pe_seed.adr = m_seed_nodes[current_index];
+ if (is_peer_used(pe_seed))
+ is_connected_to_at_least_one_seed_node = true;
+ else if (try_to_connect_and_handshake_with_new_peer(m_seed_nodes[current_index], true))
break;
if(++try_count > m_seed_nodes.size())
{
- if (!m_fallback_seed_nodes_added)
+ if (!m_fallback_seed_nodes_added.test_and_set())
{
MWARNING("Failed to connect to any of seed peers, trying fallback seeds");
- current_index = m_seed_nodes.size();
- for (const auto &peer: get_seed_nodes(m_nettype))
+ current_index = m_seed_nodes.size() - 1;
{
- MDEBUG("Fallback seed node: " << peer);
- append_net_address(m_seed_nodes, peer, cryptonote::get_config(m_nettype).P2P_DEFAULT_PORT);
+ boost::upgrade_to_unique_lock<boost::shared_mutex> seed_nodes_lock(seed_nodes_upgrade_lock);
+
+ for (const auto &peer: get_seed_nodes(m_nettype))
+ {
+ MDEBUG("Fallback seed node: " << peer);
+ append_net_address(m_seed_nodes, peer, cryptonote::get_config(m_nettype).P2P_DEFAULT_PORT);
+ }
}
- m_fallback_seed_nodes_added = true;
- if (current_index == m_seed_nodes.size())
+ if (current_index == m_seed_nodes.size() - 1)
{
MWARNING("No fallback seeds, continuing without seeds");
break;
@@ -1573,7 +1606,8 @@ namespace nodetool
}
else
{
- MWARNING("Failed to connect to any of seed peers, continuing without seeds");
+ if (!is_connected_to_at_least_one_seed_node)
+ MWARNING("Failed to connect to any of seed peers, continuing without seeds");
break;
}
}
@@ -1596,10 +1630,9 @@ namespace nodetool
// Only have seeds in the public zone right now.
size_t start_conn_count = get_public_outgoing_connections_count();
- if(!get_public_white_peers_count() && m_seed_nodes.size())
+ if(!get_public_white_peers_count() && !connect_to_seed())
{
- if (!connect_to_seed())
- return false;
+ return false;
}
if (!connect_to_peerlist(m_priority_peers)) return false;
@@ -1894,7 +1927,7 @@ namespace nodetool
}
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
- bool node_server<t_payload_net_handler>::handle_remote_peerlist(const std::vector<peerlist_entry>& peerlist, time_t local_time, const epee::net_utils::connection_context_base& context)
+ bool node_server<t_payload_net_handler>::handle_remote_peerlist(const std::vector<peerlist_entry>& peerlist, const epee::net_utils::connection_context_base& context)
{
std::vector<peerlist_entry> peerlist_ = peerlist;
if(!sanitize_peerlist(peerlist_))
@@ -1911,16 +1944,13 @@ namespace nodetool
}
LOG_DEBUG_CC(context, "REMOTE PEERLIST: remote peerlist size=" << peerlist_.size());
- LOG_DEBUG_CC(context, "REMOTE PEERLIST: " << ENDL << print_peerlist_to_string(peerlist_));
- return m_network_zones.at(context.m_remote_address.get_zone()).m_peerlist.merge_peerlist(peerlist_);
+ LOG_TRACE_CC(context, "REMOTE PEERLIST: " << ENDL << print_peerlist_to_string(peerlist_));
+ return m_network_zones.at(context.m_remote_address.get_zone()).m_peerlist.merge_peerlist(peerlist_, [this](const peerlist_entry &pe) { return !is_addr_recently_failed(pe.adr); });
}
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::get_local_node_data(basic_node_data& node_data, const network_zone& zone)
{
- time_t local_time;
- time(&local_time);
- node_data.local_time = local_time; // \TODO This can be an identifying value across zones (public internet to tor/i2p) ...
node_data.peer_id = zone.m_config.m_peer_id;
if(!m_hide_my_port && zone.m_can_pingback)
node_data.my_port = m_external_port ? m_external_port : m_listening_port;
@@ -1932,91 +1962,6 @@ namespace nodetool
return true;
}
//-----------------------------------------------------------------------------------
-#ifdef ALLOW_DEBUG_COMMANDS
- template<class t_payload_net_handler>
- bool node_server<t_payload_net_handler>::check_trust(const proof_of_trust& tr, const epee::net_utils::zone zone_type)
- {
- uint64_t local_time = time(NULL);
- uint64_t time_delata = local_time > tr.time ? local_time - tr.time: tr.time - local_time;
- if(time_delata > 24*60*60 )
- {
- MWARNING("check_trust failed to check time conditions, local_time=" << local_time << ", proof_time=" << tr.time);
- return false;
- }
- if(m_last_stat_request_time >= tr.time )
- {
- MWARNING("check_trust failed to check time conditions, last_stat_request_time=" << m_last_stat_request_time << ", proof_time=" << tr.time);
- return false;
- }
-
- const network_zone& zone = m_network_zones.at(zone_type);
- if(zone.m_config.m_peer_id != tr.peer_id)
- {
- MWARNING("check_trust failed: peer_id mismatch (passed " << tr.peer_id << ", expected " << peerid_to_string(zone.m_config.m_peer_id) << ")");
- return false;
- }
- crypto::public_key pk = AUTO_VAL_INIT(pk);
- epee::string_tools::hex_to_pod(::config::P2P_REMOTE_DEBUG_TRUSTED_PUB_KEY, pk);
- crypto::hash h = get_proof_of_trust_hash(tr);
- if(!crypto::check_signature(h, pk, tr.sign))
- {
- MWARNING("check_trust failed: sign check failed");
- return false;
- }
- //update last request time
- m_last_stat_request_time = tr.time;
- return true;
- }
- //-----------------------------------------------------------------------------------
- template<class t_payload_net_handler>
- int node_server<t_payload_net_handler>::handle_get_stat_info(int command, typename COMMAND_REQUEST_STAT_INFO::request& arg, typename COMMAND_REQUEST_STAT_INFO::response& rsp, p2p_connection_context& context)
- {
- if(!check_trust(arg.tr, context.m_remote_address.get_zone()))
- {
- drop_connection(context);
- return 1;
- }
- rsp.connections_count = get_connections_count();
- rsp.incoming_connections_count = rsp.connections_count - get_outgoing_connections_count();
- rsp.version = MONERO_VERSION_FULL;
- rsp.os_version = tools::get_os_version_string();
- m_payload_handler.get_stat_info(rsp.payload_info);
- return 1;
- }
- //-----------------------------------------------------------------------------------
- template<class t_payload_net_handler>
- int node_server<t_payload_net_handler>::handle_get_network_state(int command, COMMAND_REQUEST_NETWORK_STATE::request& arg, COMMAND_REQUEST_NETWORK_STATE::response& rsp, p2p_connection_context& context)
- {
- if(!check_trust(arg.tr, context.m_remote_address.get_zone()))
- {
- drop_connection(context);
- return 1;
- }
- m_network_zones.at(epee::net_utils::zone::public_).m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
- {
- connection_entry ce;
- ce.adr = cntxt.m_remote_address;
- ce.id = cntxt.peer_id;
- ce.is_income = cntxt.m_is_income;
- rsp.connections_list.push_back(ce);
- return true;
- });
-
- network_zone& zone = m_network_zones.at(context.m_remote_address.get_zone());
- zone.m_peerlist.get_peerlist(rsp.local_peerlist_gray, rsp.local_peerlist_white);
- rsp.my_id = zone.m_config.m_peer_id;
- rsp.local_time = time(NULL);
- return 1;
- }
- //-----------------------------------------------------------------------------------
- template<class t_payload_net_handler>
- int node_server<t_payload_net_handler>::handle_get_peer_id(int command, COMMAND_REQUEST_PEER_ID::request& arg, COMMAND_REQUEST_PEER_ID::response& rsp, p2p_connection_context& context)
- {
- rsp.my_id = m_network_zones.at(context.m_remote_address.get_zone()).m_config.m_peer_id;
- return 1;
- }
-#endif
- //-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
int node_server<t_payload_net_handler>::handle_get_support_flags(int command, COMMAND_REQUEST_SUPPORT_FLAGS::request& arg, COMMAND_REQUEST_SUPPORT_FLAGS::response& rsp, p2p_connection_context& context)
{
@@ -2056,18 +2001,13 @@ namespace nodetool
}
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
- epee::net_utils::zone node_server<t_payload_net_handler>::send_txs(std::vector<cryptonote::blobdata> txs, const epee::net_utils::zone origin, const boost::uuids::uuid& source, cryptonote::i_core_events& core)
+ epee::net_utils::zone node_server<t_payload_net_handler>::send_txs(std::vector<cryptonote::blobdata> txs, const epee::net_utils::zone origin, const boost::uuids::uuid& source, cryptonote::i_core_events& core, const cryptonote::relay_method tx_relay)
{
namespace enet = epee::net_utils;
- const auto send = [&txs, &source, &core] (std::pair<const enet::zone, network_zone>& network)
+ const auto send = [&txs, &source, &core, tx_relay] (std::pair<const enet::zone, network_zone>& network)
{
- const bool is_public = (network.first == enet::zone::public_);
- const cryptonote::relay_method tx_relay = is_public ?
- cryptonote::relay_method::fluff : cryptonote::relay_method::local;
-
- core.on_transactions_relayed(epee::to_span(txs), tx_relay);
- if (network.second.m_notifier.send_txs(std::move(txs), source))
+ if (network.second.m_notifier.send_txs(std::move(txs), source, core, tx_relay))
return network.first;
return enet::zone::invalid;
};
@@ -2291,12 +2231,20 @@ namespace nodetool
}
//fill response
- rsp.local_time = time(NULL);
-
const epee::net_utils::zone zone_type = context.m_remote_address.get_zone();
network_zone& zone = m_network_zones.at(zone_type);
- zone.m_peerlist.get_peerlist_head(rsp.local_peerlist_new, true);
+ std::vector<peerlist_entry> local_peerlist_new;
+ zone.m_peerlist.get_peerlist_head(local_peerlist_new, true, P2P_DEFAULT_PEERS_IN_HANDSHAKE);
+
+ //only include out peers we did not already send
+ rsp.local_peerlist_new.reserve(local_peerlist_new.size());
+ for (auto &pe: local_peerlist_new)
+ {
+ if (!context.sent_addresses.insert(pe.adr).second)
+ continue;
+ rsp.local_peerlist_new.push_back(std::move(pe));
+ }
m_payload_handler.get_payload_sync_data(rsp.payload_data);
/* Tor/I2P nodes receiving connections via forwarding (from tor/i2p daemon)
@@ -2418,6 +2366,8 @@ namespace nodetool
//fill response
zone.m_peerlist.get_peerlist_head(rsp.local_peerlist_new, true);
+ for (const auto &e: rsp.local_peerlist_new)
+ context.sent_addresses.insert(e.adr);
get_local_node_data(rsp.node_data, zone);
m_payload_handler.get_payload_sync_data(rsp.payload_data);
LOG_DEBUG_CC(context, "COMMAND_HANDSHAKE");