diff options
Diffstat (limited to 'src/p2p/net_node.inl')
-rw-r--r-- | src/p2p/net_node.inl | 366 |
1 files changed, 302 insertions, 64 deletions
diff --git a/src/p2p/net_node.inl b/src/p2p/net_node.inl index b6337e6b5..cede6d0a0 100644 --- a/src/p2p/net_node.inl +++ b/src/p2p/net_node.inl @@ -1,4 +1,4 @@ -// Copyright (c) 2014, The Monero Project +// Copyright (c) 2014-2015, The Monero Project // // All rights reserved. // @@ -31,6 +31,9 @@ #pragma once #include <algorithm> +#include <boost/date_time/posix_time/posix_time.hpp> +#include <boost/thread/thread.hpp> +#include <atomic> #include "version.h" #include "string_tools.h" @@ -43,16 +46,18 @@ #include "net/local_ip.h" #include "crypto/crypto.h" #include "storages/levin_abstract_invoke2.h" +#include "data_logger.hpp" +#include "daemon/command_line_args.h" // We have to look for miniupnpc headers in different places, dependent on if its compiled or external #ifdef UPNP_STATIC - #include <miniupnpc/miniupnpc.h> - #include <miniupnpc/upnpcommands.h> - #include <miniupnpc/upnperrors.h> + #include <miniupnpc/miniupnpc.h> + #include <miniupnpc/upnpcommands.h> + #include <miniupnpc/upnperrors.h> #else - #include "miniupnpc.h" - #include "upnpcommands.h" - #include "upnperrors.h" + #include "miniupnpc.h" + #include "upnpcommands.h" + #include "upnperrors.h" #endif #define NET_MAKE_IP(b1,b2,b3,b4) ((LPARAM)(((DWORD)(b1)<<24)+((DWORD)(b2)<<16)+((DWORD)(b3)<<8)+((DWORD)(b4)))) @@ -81,6 +86,16 @@ namespace nodetool " If this option is given the options add-priority-node and seed-node are ignored"}; const command_line::arg_descriptor<std::vector<std::string> > arg_p2p_seed_node = {"seed-node", "Connect to a node to retrieve peer addresses, and disconnect"}; const command_line::arg_descriptor<bool> arg_p2p_hide_my_port = {"hide-my-port", "Do not announce yourself as peerlist candidate", false, true}; + + const command_line::arg_descriptor<bool> arg_no_igd = {"no-igd", "Disable UPnP port mapping"}; + const command_line::arg_descriptor<int64_t> arg_out_peers = {"out-peers", "set max limit of out peers", -1}; + const command_line::arg_descriptor<int> arg_tos_flag = {"tos-flag", "set TOS flag", -1}; + + const command_line::arg_descriptor<int64_t> arg_limit_rate_up = {"limit-rate-up", "set limit-rate-up [kB/s]", -1}; + const command_line::arg_descriptor<int64_t> arg_limit_rate_down = {"limit-rate-down", "set limit-rate-down [kB/s]", -1}; + const command_line::arg_descriptor<uint64_t> arg_limit_rate = {"limit-rate", "set limit-rate [kB/s]", 128}; + + const command_line::arg_descriptor<bool> arg_save_graph = {"save-graph", "Save data for dr monero", false}; } //----------------------------------------------------------------------------------- template<class t_payload_net_handler> @@ -96,6 +111,13 @@ namespace nodetool command_line::add_arg(desc, arg_p2p_add_exclusive_node); command_line::add_arg(desc, arg_p2p_seed_node); command_line::add_arg(desc, arg_p2p_hide_my_port); + command_line::add_arg(desc, arg_no_igd); + command_line::add_arg(desc, arg_out_peers); + command_line::add_arg(desc, arg_tos_flag); + command_line::add_arg(desc, arg_limit_rate_up); + command_line::add_arg(desc, arg_limit_rate_down); + command_line::add_arg(desc, arg_limit_rate); + command_line::add_arg(desc, arg_save_graph); } //----------------------------------------------------------------------------------- template<class t_payload_net_handler> @@ -117,7 +139,6 @@ namespace nodetool //at this moment we have hardcoded config m_config.m_net_config.handshake_interval = P2P_DEFAULT_HANDSHAKE_INTERVAL; - m_config.m_net_config.connections_count = P2P_DEFAULT_CONNECTIONS_COUNT; m_config.m_net_config.packet_max_size = P2P_DEFAULT_PACKET_MAX_SIZE; //20 MB limit m_config.m_net_config.config_id = 0; // initial config m_config.m_net_config.connection_timeout = P2P_DEFAULT_CONNECTION_TIMEOUT; @@ -162,6 +183,7 @@ namespace nodetool m_port = command_line::get_arg(vm, p2p_bind_arg); m_external_port = command_line::get_arg(vm, arg_p2p_external_port); m_allow_local_ip = command_line::get_arg(vm, arg_p2p_allow_local_ip); + m_no_igd = command_line::get_arg(vm, arg_no_igd); if (command_line::has_arg(vm, arg_p2p_add_peer)) { @@ -175,17 +197,24 @@ namespace nodetool m_command_line_peers.push_back(pe); } } + + if(command_line::has_arg(vm, arg_save_graph)) + { + set_save_graph(true); + } if (command_line::has_arg(vm,arg_p2p_add_exclusive_node)) { if (!parse_peers_and_add_to_container(vm, arg_p2p_add_exclusive_node, m_exclusive_peers)) return false; } + if (command_line::has_arg(vm, arg_p2p_add_priority_node)) { if (!parse_peers_and_add_to_container(vm, arg_p2p_add_priority_node, m_priority_peers)) return false; } + if (command_line::has_arg(vm, arg_p2p_seed_node)) { if (!parse_peers_and_add_to_container(vm, arg_p2p_seed_node, m_seed_nodes)) @@ -194,6 +223,21 @@ namespace nodetool if(command_line::has_arg(vm, arg_p2p_hide_my_port)) m_hide_my_port = true; + + if ( !set_max_out_peers(vm, command_line::get_arg(vm, arg_out_peers) ) ) + return false; + + if ( !set_tos_flag(vm, command_line::get_arg(vm, arg_tos_flag) ) ) + return false; + + if ( !set_rate_up_limit(vm, command_line::get_arg(vm, arg_limit_rate_up) ) ) + return false; + + if ( !set_rate_down_limit(vm, command_line::get_arg(vm, arg_limit_rate_down) ) ) + return false; + + if ( !set_rate_limit(vm, command_line::get_arg(vm, arg_limit_rate) ) ) + return false; return true; } @@ -238,44 +282,117 @@ namespace nodetool //----------------------------------------------------------------------------------- template<class t_payload_net_handler> - bool node_server<t_payload_net_handler>::init(const boost::program_options::variables_map& vm, bool testnet) + bool node_server<t_payload_net_handler>::init(const boost::program_options::variables_map& vm) { + bool testnet = command_line::get_arg(vm, daemon_args::arg_testnet_on); + if (testnet) { - append_net_address(m_seed_nodes, "107.152.187.202:28080"); + memcpy(&m_network_id, &::config::testnet::NETWORK_ID, 16); append_net_address(m_seed_nodes, "197.242.158.240:28080"); append_net_address(m_seed_nodes, "107.152.130.98:28080"); + append_net_address(m_seed_nodes, "5.9.25.103:28080"); + append_net_address(m_seed_nodes, "5.9.55.70:28080"); } else { + memcpy(&m_network_id, &::config::NETWORK_ID, 16); // for each hostname in the seed nodes list, attempt to DNS resolve and // add the result addresses as seed nodes // TODO: at some point add IPv6 support, but that won't be relevant // for some time yet. + + std::vector<std::vector<std::string>> dns_results; + dns_results.resize(m_seed_nodes_list.size()); + + std::list<boost::thread*> dns_threads; + uint64_t result_index = 0; for (const std::string& addr_str : m_seed_nodes_list) { - // TODO: care about dnssec avail/valid - bool avail, valid; - std::vector<std::string> addr_list = tools::DNSResolver::instance().get_ipv4(addr_str, avail, valid); - for (const std::string& a : addr_list) + boost::thread* th = new boost::thread([=, &dns_results, &addr_str] + { + LOG_PRINT_L4("dns_threads[" << result_index << "] created for: " << addr_str) + // TODO: care about dnssec avail/valid + bool avail, valid; + std::vector<std::string> addr_list; + + try + { + addr_list = tools::DNSResolver().get_ipv4(addr_str, avail, valid); + LOG_PRINT_L4("dns_threads[" << result_index << "] DNS resolve done"); + boost::this_thread::interruption_point(); + } + catch(const boost::thread_interrupted&) + { + // thread interruption request + // even if we now have results, finish thread without setting + // result variables, which are now out of scope in main thread + LOG_PRINT_L4("dns_threads[" << result_index << "] interrupted"); + return; + } + + LOG_PRINT_L4("dns_threads[" << result_index << "] addr_str: " << addr_str << " number of results: " << addr_list.size()); + dns_results[result_index] = addr_list; + }); + + dns_threads.push_back(th); + ++result_index; + } + + LOG_PRINT_L4("dns_threads created, now waiting for completion or timeout of " << CRYPTONOTE_DNS_TIMEOUT_MS << "ms"); + boost::chrono::system_clock::time_point deadline = boost::chrono::system_clock::now() + boost::chrono::milliseconds(CRYPTONOTE_DNS_TIMEOUT_MS); + uint64_t i = 0; + for (boost::thread* th : dns_threads) + { + if (! th->try_join_until(deadline)) + { + LOG_PRINT_L4("dns_threads[" << i << "] timed out, sending interrupt"); + th->interrupt(); + } + ++i; + } + + i = 0; + for (const auto& result : dns_results) + { + LOG_PRINT_L4("DNS lookup for " << m_seed_nodes_list[i] << ": " << result.size() << " results"); + // if no results for node, thread's lookup likely timed out + if (result.size()) { - append_net_address(m_seed_nodes, a + ":18080"); + for (const auto& addr_string : result) + { + append_net_address(m_seed_nodes, addr_string + ":18080"); + } } + ++i; } if (!m_seed_nodes.size()) { - append_net_address(m_seed_nodes, "62.210.78.186:18080"); - append_net_address(m_seed_nodes, "195.12.60.154:18080"); - append_net_address(m_seed_nodes, "54.241.246.125:18080"); - append_net_address(m_seed_nodes, "107.170.157.169:18080"); - append_net_address(m_seed_nodes, "54.207.112.216:18080"); - append_net_address(m_seed_nodes, "78.27.112.54:18080"); - append_net_address(m_seed_nodes, "209.222.30.57:18080"); - append_net_address(m_seed_nodes, "80.71.13.55:18080"); - append_net_address(m_seed_nodes, "107.178.112.126:18080"); - append_net_address(m_seed_nodes, "107.158.233.98:18080"); - append_net_address(m_seed_nodes, "64.22.111.2:18080"); + LOG_PRINT_L0("DNS seed node lookup either timed out or failed, falling back to defaults"); + append_net_address(m_seed_nodes, "46.165.232.77:18080"); + append_net_address(m_seed_nodes, "63.141.254.186:18080"); + append_net_address(m_seed_nodes, ":18080"); + append_net_address(m_seed_nodes, "119.81.118.164:18080"); + append_net_address(m_seed_nodes, "60.191.33.112:18080"); + append_net_address(m_seed_nodes, "198.74.231.92:18080"); + append_net_address(m_seed_nodes, "5.9.55.70:18080"); + append_net_address(m_seed_nodes, "119.81.118.165:18080"); + append_net_address(m_seed_nodes, "202.112.0.100:18080"); + append_net_address(m_seed_nodes, "84.106.163.174:18080"); + append_net_address(m_seed_nodes, "178.206.94.87:18080"); + append_net_address(m_seed_nodes, "119.81.118.163:18080"); + append_net_address(m_seed_nodes, "95.37.217.253:18080"); + append_net_address(m_seed_nodes, "161.67.132.39:18080"); + append_net_address(m_seed_nodes, "119.81.48.114:18080"); + append_net_address(m_seed_nodes, "119.81.118.166:18080"); + append_net_address(m_seed_nodes, "93.120.240.209:18080"); + append_net_address(m_seed_nodes, "46.183.145.69:18080"); + append_net_address(m_seed_nodes, "108.170.123.66:18080"); + append_net_address(m_seed_nodes, "5.9.83.204:18080"); + append_net_address(m_seed_nodes, "104.130.19.193:18080"); + append_net_address(m_seed_nodes, "119.81.48.115:18080"); + append_net_address(m_seed_nodes, "80.71.13.36:18080"); } } @@ -316,42 +433,43 @@ namespace nodetool LOG_PRINT_L0("External port defined as " << m_external_port); // Add UPnP port mapping - LOG_PRINT_L0("Attempting to add IGD port mapping."); - int result; - UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, &result); - UPNPUrls urls; - IGDdatas igdData; - char lanAddress[64]; - result = UPNP_GetValidIGD(deviceList, &urls, &igdData, lanAddress, sizeof lanAddress); - freeUPNPDevlist(deviceList); - if (result != 0) { - if (result == 1) { - std::ostringstream portString; - portString << m_listenning_port; - - // Delete the port mapping before we create it, just in case we have dangling port mapping from the daemon not being shut down correctly - UPNP_DeletePortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), "TCP", 0); - - int portMappingResult; - portMappingResult = UPNP_AddPortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), portString.str().c_str(), lanAddress, CRYPTONOTE_NAME, "TCP", 0, "0"); - if (portMappingResult != 0) { - LOG_ERROR("UPNP_AddPortMapping failed, error: " << strupnperror(portMappingResult)); - } else { - LOG_PRINT_GREEN("Added IGD port mapping.", LOG_LEVEL_0); - } - } else if (result == 2) { - LOG_PRINT_L0("IGD was found but reported as not connected."); - } else if (result == 3) { - LOG_PRINT_L0("UPnP device was found but not recoginzed as IGD."); - } else { - LOG_ERROR("UPNP_GetValidIGD returned an unknown result code."); - } - - FreeUPNPUrls(&urls); - } else { - LOG_PRINT_L0("No IGD was found."); - } - + if(m_no_igd == false) { + LOG_PRINT_L0("Attempting to add IGD port mapping."); + int result; + UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, &result); + UPNPUrls urls; + IGDdatas igdData; + char lanAddress[64]; + result = UPNP_GetValidIGD(deviceList, &urls, &igdData, lanAddress, sizeof lanAddress); + freeUPNPDevlist(deviceList); + if (result != 0) { + if (result == 1) { + std::ostringstream portString; + portString << m_listenning_port; + + // Delete the port mapping before we create it, just in case we have dangling port mapping from the daemon not being shut down correctly + UPNP_DeletePortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), "TCP", 0); + + int portMappingResult; + portMappingResult = UPNP_AddPortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), portString.str().c_str(), lanAddress, CRYPTONOTE_NAME, "TCP", 0, "0"); + if (portMappingResult != 0) { + LOG_ERROR("UPNP_AddPortMapping failed, error: " << strupnperror(portMappingResult)); + } else { + LOG_PRINT_GREEN("Added IGD port mapping.", LOG_LEVEL_0); + } + } else if (result == 2) { + LOG_PRINT_L0("IGD was found but reported as not connected."); + } else if (result == 3) { + LOG_PRINT_L0("UPnP device was found but not recoginzed as IGD."); + } else { + LOG_ERROR("UPNP_GetValidIGD returned an unknown result code."); + } + + FreeUPNPUrls(&urls); + } else { + LOG_PRINT_L0("No IGD was found."); + } + } return res; } //----------------------------------------------------------------------------------- @@ -364,6 +482,30 @@ namespace nodetool template<class t_payload_net_handler> bool node_server<t_payload_net_handler>::run() { + // creating thread to log number of connections + mPeersLoggerThread.reset(new std::thread([&]() + { + _note("Thread monitor number of peers - start"); + while (!is_closing) + { // main loop of thread + //number_of_peers = m_net_server.get_config_object().get_connections_count(); + unsigned int number_of_peers = 0; + m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt) + { + if (!cntxt.m_is_income) ++number_of_peers; + return true; + }); // lambda + + m_current_number_of_out_peers = number_of_peers; + if (epee::net_utils::data_logger::is_dying()) + break; + epee::net_utils::data_logger::get_instance().add_data("peers", number_of_peers); + + std::this_thread::sleep_for(std::chrono::seconds(1)); + } // main loop of thread + _note("Thread monitor number of peers - done"); + })); // lambda + //here you can set worker threads count int thrds_count = 10; @@ -394,6 +536,7 @@ namespace nodetool template<class t_payload_net_handler> bool node_server<t_payload_net_handler>::deinit() { + kill(); m_peerlist.deinit(); m_net_server.deinit_server(); return store_config(); @@ -606,6 +749,16 @@ namespace nodetool template<class t_payload_net_handler> bool node_server<t_payload_net_handler>::try_to_connect_and_handshake_with_new_peer(const net_address& na, bool just_take_peerlist, uint64_t last_seen_stamp, bool white) { + if (m_current_number_of_out_peers == m_config.m_net_config.connections_count) // out peers limit + { + return false; + } + else if (m_current_number_of_out_peers > m_config.m_net_config.connections_count) + { + m_net_server.get_config_object().del_out_connections(1); + m_current_number_of_out_peers --; // atomic variable, update time = 1s + return false; + } LOG_PRINT_L1("Connecting to " << epee::string_tools::get_ip_string_from_int32(na.ip) << ":" << epee::string_tools::num_to_string_fast(na.port) << "(white=" << white << ", last_seen: " << (last_seen_stamp ? epee::misc_utils::get_time_interval_string(time(NULL) - last_seen_stamp):"never") @@ -693,16 +846,22 @@ namespace nodetool ++try_count; - if(is_peer_used(pe)) + _note("Considering connecting (out) to peer: " << pe.id << " " << epee::string_tools::get_ip_string_from_int32(pe.adr.ip) << ":" << boost::lexical_cast<std::string>(pe.adr.port)); + + if(is_peer_used(pe)) { + _note("Peer is used"); continue; + } LOG_PRINT_L1("Selected peer: " << pe.id << " " << epee::string_tools::get_ip_string_from_int32(pe.adr.ip) << ":" << boost::lexical_cast<std::string>(pe.adr.port) << "[white=" << use_white_list << "] last_seen: " << (pe.last_seen ? epee::misc_utils::get_time_interval_string(time(NULL) - pe.last_seen) : "never")); - if(!try_to_connect_and_handshake_with_new_peer(pe.adr, false, pe.last_seen, use_white_list)) + if(!try_to_connect_and_handshake_with_new_peer(pe.adr, false, pe.last_seen, use_white_list)) { + _note("Handshake failed"); continue; + } return true; } @@ -1241,4 +1400,83 @@ namespace nodetool return true; } + + template<class t_payload_net_handler> + bool node_server<t_payload_net_handler>::set_max_out_peers(const boost::program_options::variables_map& vm, int64_t max) + { + if(max == -1) { + m_config.m_net_config.connections_count = P2P_DEFAULT_CONNECTIONS_COUNT; + epee::net_utils::data_logger::get_instance().add_data("peers_limit", m_config.m_net_config.connections_count); + return true; + } + epee::net_utils::data_logger::get_instance().add_data("peers_limit", max); + m_config.m_net_config.connections_count = max; + return true; + } + + template<class t_payload_net_handler> + void node_server<t_payload_net_handler>::delete_connections(size_t count) + { + m_net_server.get_config_object().del_out_connections(count); + } + + template<class t_payload_net_handler> + bool node_server<t_payload_net_handler>::set_tos_flag(const boost::program_options::variables_map& vm, int flag) + { + if(flag==-1){ + return true; + } + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_tos_flag(flag); + _dbg1("Set ToS flag " << flag); + return true; + } + + template<class t_payload_net_handler> + bool node_server<t_payload_net_handler>::set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit) + { + this->islimitup=true; + + if (limit==-1) { + limit=128; + this->islimitup=false; + } + + limit *= 1024; + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit( limit ); + LOG_PRINT_L0("Set limit-up to " << limit/1024 << " kB/s"); + return true; + } + + template<class t_payload_net_handler> + bool node_server<t_payload_net_handler>::set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit) + { + this->islimitdown=true; + if(limit==-1) { + limit=128; + this->islimitdown=false; + } + limit *= 1024; + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit( limit ); + LOG_PRINT_L0("Set limit-down to " << limit/1024 << " kB/s"); + return true; + } + + template<class t_payload_net_handler> + bool node_server<t_payload_net_handler>::set_rate_limit(const boost::program_options::variables_map& vm, uint64_t limit) + { + limit *= 1024; + if(this->islimitdown==false && this->islimitup==false) { + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit( limit ); + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit( limit ); + LOG_PRINT_L0("Set limit to " << limit/1024 << " kB/s"); + } + else if(this->islimitdown==false && this->islimitup==true ) { + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit( limit ); + } + else if(this->islimitdown==true && this->islimitup==false ) { + epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit( limit ); + } + + return true; + } } |