diff options
author | moneromooo-monero <moneromooo-monero@users.noreply.github.com> | 2019-09-16 12:18:34 +0000 |
---|---|---|
committer | moneromooo-monero <moneromooo-monero@users.noreply.github.com> | 2019-09-27 00:10:37 +0000 |
commit | 8330e772f1ed680a54833d25c4d17d09a99ab8d6 (patch) | |
tree | d536126b06a00fabd56881363daaa9a1fd305c80 /src/cryptonote_protocol/cryptonote_protocol_handler.inl | |
parent | Merge pull request #5876 (diff) | |
download | monero-8330e772f1ed680a54833d25c4d17d09a99ab8d6.tar.xz |
monerod can now sync from pruned blocks
If the peer (whether pruned or not itself) supports sending pruned blocks
to syncing nodes, the pruned version will be sent along with the hash
of the pruned data and the block weight. The original tx hashes can be
reconstructed from the pruned txes and theur prunable data hash. Those
hashes and the block weights are hashes and checked against the set of
precompiled hashes, ensuring the data we received is the original data.
It is currently not possible to use this system when not using the set
of precompiled hashes, since block weights can not otherwise be checked
for validity.
This is off by default for now, and is enabled by --sync-pruned-blocks
Diffstat (limited to 'src/cryptonote_protocol/cryptonote_protocol_handler.inl')
-rw-r--r-- | src/cryptonote_protocol/cryptonote_protocol_handler.inl | 183 |
1 files changed, 153 insertions, 30 deletions
diff --git a/src/cryptonote_protocol/cryptonote_protocol_handler.inl b/src/cryptonote_protocol/cryptonote_protocol_handler.inl index 65115ee72..c0234bde3 100644 --- a/src/cryptonote_protocol/cryptonote_protocol_handler.inl +++ b/src/cryptonote_protocol/cryptonote_protocol_handler.inl @@ -106,6 +106,7 @@ namespace cryptonote m_sync_download_objects_size = 0; m_block_download_max_size = command_line::get_arg(vm, cryptonote::arg_block_download_max_size); + m_sync_pruned_blocks = command_line::get_arg(vm, cryptonote::arg_sync_pruned_blocks); return true; } @@ -138,6 +139,7 @@ namespace cryptonote context.m_needed_objects.clear(); m_core.get_short_chain_history(r.block_ids); handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?) + r.prune = m_sync_pruned_blocks; MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() ); post_notify<NOTIFY_REQUEST_CHAIN>(r, context); MLOG_PEER_STATE("requesting chain"); @@ -493,6 +495,7 @@ namespace cryptonote context.m_state = cryptonote_connection_context::state_synchronizing; NOTIFY_REQUEST_CHAIN::request r = boost::value_initialized<NOTIFY_REQUEST_CHAIN::request>(); m_core.get_short_chain_history(r.block_ids); + r.prune = m_sync_pruned_blocks; handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?) MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() ); post_notify<NOTIFY_REQUEST_CHAIN>(r, context); @@ -540,9 +543,9 @@ namespace cryptonote return 1; } } - - std::vector<blobdata> have_tx; - + + std::vector<tx_blob_entry> have_tx; + // Instead of requesting missing transactions by hash like BTC, // we do it by index (thanks to a suggestion from moneromooo) because // we're way cooler .. and also because they're smaller than hashes. @@ -556,7 +559,7 @@ namespace cryptonote for(auto& tx_blob: arg.b.txs) { - if(parse_and_validate_tx_from_blob(tx_blob, tx)) + if(parse_and_validate_tx_from_blob(tx_blob.blob, tx)) { try { @@ -641,7 +644,7 @@ namespace cryptonote LOG_ERROR_CCONTEXT ( "sent wrong tx: failed to parse and validate transaction: " - << epee::string_tools::buff_to_hex_nodelimer(tx_blob) + << epee::string_tools::buff_to_hex_nodelimer(tx_blob.blob) << ", dropping connection" ); @@ -676,7 +679,7 @@ namespace cryptonote cryptonote::blobdata txblob; if(m_core.get_pool_transaction(tx_hash, txblob)) { - have_tx.push_back(txblob); + have_tx.push_back({txblob, crypto::null_hash}); } else { @@ -688,7 +691,7 @@ namespace cryptonote { if (txes.size() == 1) { - have_tx.push_back(tx_to_blob(txes.front())); + have_tx.push_back({tx_to_blob(txes.front()), crypto::null_hash}); } else { @@ -771,6 +774,7 @@ namespace cryptonote NOTIFY_REQUEST_CHAIN::request r = boost::value_initialized<NOTIFY_REQUEST_CHAIN::request>(); m_core.get_short_chain_history(r.block_ids); handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?) + r.prune = m_sync_pruned_blocks; MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() ); post_notify<NOTIFY_REQUEST_CHAIN>(r, context); MLOG_PEER_STATE("requesting chain"); @@ -872,7 +876,7 @@ namespace cryptonote for(auto& tx: txs) { - fluffy_response.b.txs.push_back(t_serializable_object_to_blob(tx)); + fluffy_response.b.txs.push_back({t_serializable_object_to_blob(tx), crypto::null_hash}); } MLOG_P2P_MESSAGE @@ -910,7 +914,7 @@ namespace cryptonote for (size_t i = 0; i < arg.txs.size(); ++i) { cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc); - m_core.handle_incoming_tx(arg.txs[i], tvc, false, true, false); + m_core.handle_incoming_tx({arg.txs[i], crypto::null_hash}, tvc, false, true, false); if(tvc.m_verifivation_failed) { LOG_PRINT_CCONTEXT_L1("Tx verification failed, dropping connection"); @@ -991,7 +995,7 @@ namespace cryptonote for (const auto &element : arg.blocks) { blocks_size += element.block.size(); for (const auto &tx : element.txs) - blocks_size += tx.size(); + blocks_size += tx.blob.size(); } size += blocks_size; @@ -1090,6 +1094,53 @@ namespace cryptonote return 1; } + const bool pruned_ok = should_ask_for_pruned_data(context, start_height, arg.blocks.size(), true); + if (!pruned_ok) + { + // if we don't want pruned data, check we did not get any + for (block_complete_entry& block_entry: arg.blocks) + { + if (block_entry.pruned) + { + MERROR(context << "returned a pruned block, dropping connection"); + drop_connection(context, false, false); + ++m_sync_bad_spans_downloaded; + return 1; + } + if (block_entry.block_weight) + { + MERROR(context << "returned a block weight for a non pruned block, dropping connection"); + drop_connection(context, false, false); + ++m_sync_bad_spans_downloaded; + return 1; + } + for (const tx_blob_entry &tx_entry: block_entry.txs) + { + if (tx_entry.prunable_hash != crypto::null_hash) + { + MERROR(context << "returned at least one pruned object which we did not expect, dropping connection"); + drop_connection(context, false, false); + ++m_sync_bad_spans_downloaded; + return 1; + } + } + } + } + else + { + // we accept pruned data, check that if we got some, then no weights are zero + for (block_complete_entry& block_entry: arg.blocks) + { + if (block_entry.block_weight == 0 && block_entry.pruned) + { + MERROR(context << "returned at least one pruned block with 0 weight, dropping connection"); + drop_connection(context, false, false); + ++m_sync_bad_spans_downloaded; + return 1; + } + } + } + { MLOG_YELLOW(el::Level::Debug, context << " Got NEW BLOCKS inside of " << __FUNCTION__ << ": size: " << arg.blocks.size() << ", blocks: " << start_height << " - " << (start_height + arg.blocks.size() - 1) << @@ -1273,18 +1324,32 @@ namespace cryptonote if (tvc.size() != block_entry.txs.size()) { LOG_ERROR_CCONTEXT("Internal error: tvc.size() != block_entry.txs.size()"); + if (!m_core.cleanup_handle_incoming_blocks()) + { + LOG_PRINT_CCONTEXT_L0("Failure in cleanup_handle_incoming_blocks"); + return 1; + } return 1; } - std::vector<blobdata>::const_iterator it = block_entry.txs.begin(); + std::vector<tx_blob_entry>::const_iterator it = block_entry.txs.begin(); for (size_t i = 0; i < tvc.size(); ++i, ++it) { if(tvc[i].m_verifivation_failed) { if (!m_p2p->for_connection(span_connection_id, [&](cryptonote_connection_context& context, nodetool::peerid_type peer_id, uint32_t f)->bool{ cryptonote::transaction tx; - parse_and_validate_tx_from_blob(*it, tx); // must succeed if we got here + crypto::hash txid; + if (it->prunable_hash == crypto::null_hash) + { + parse_and_validate_tx_from_blob(it->blob, tx, txid); // must succeed if we got here + } + else + { + parse_and_validate_tx_base_from_blob(it->blob, tx); // must succeed if we got here + txid = get_pruned_transaction_hash(tx, it->prunable_hash); + } LOG_ERROR_CCONTEXT("transaction verification failed on NOTIFY_RESPONSE_GET_OBJECTS, tx_id = " - << epee::string_tools::pod_to_hex(cryptonote::get_transaction_hash(tx)) << ", dropping connection"); + << epee::string_tools::pod_to_hex(txid) << ", dropping connection"); drop_connection(context, false, true); return 1; })) @@ -1543,7 +1608,7 @@ skip: { MLOG_P2P_MESSAGE("Received NOTIFY_REQUEST_CHAIN (" << arg.block_ids.size() << " blocks"); NOTIFY_RESPONSE_CHAIN_ENTRY::request r; - if(!m_core.find_blockchain_supplement(arg.block_ids, r)) + if(!m_core.find_blockchain_supplement(arg.block_ids, !arg.prune, r)) { LOG_ERROR_CCONTEXT("Failed to handle NOTIFY_REQUEST_CHAIN."); drop_connection(context, false, false); @@ -1662,6 +1727,12 @@ skip: MDEBUG(context << "This peer has needed stripe " << peer_stripe << ", not dropping"); return false; } + const uint32_t local_stripe = tools::get_pruning_stripe(m_core.get_blockchain_pruning_seed()); + if (m_sync_pruned_blocks && peer_stripe == local_stripe) + { + MDEBUG(context << "We can sync pruned blocks off this peer, not dropping"); + return false; + } if (!context.m_needed_objects.empty()) { @@ -1701,22 +1772,42 @@ skip: { // take out blocks we already have size_t skip = 0; - while (skip < context.m_needed_objects.size() && (m_core.have_block(context.m_needed_objects[skip]) || (check_block_queue && m_block_queue.have(context.m_needed_objects[skip])))) + while (skip < context.m_needed_objects.size() && (m_core.have_block(context.m_needed_objects[skip].first) || (check_block_queue && m_block_queue.have(context.m_needed_objects[skip].first)))) { // if we're popping the last hash, record it so we can ask again from that hash, // this prevents never being able to progress on peers we get old hash lists from if (skip + 1 == context.m_needed_objects.size()) - context.m_last_known_hash = context.m_needed_objects[skip]; + context.m_last_known_hash = context.m_needed_objects[skip].first; ++skip; } if (skip > 0) { MDEBUG(context << "skipping " << skip << "/" << context.m_needed_objects.size() << " blocks"); - context.m_needed_objects = std::vector<crypto::hash>(context.m_needed_objects.begin() + skip, context.m_needed_objects.end()); + context.m_needed_objects = std::vector<std::pair<crypto::hash, uint64_t>>(context.m_needed_objects.begin() + skip, context.m_needed_objects.end()); } } //------------------------------------------------------------------------------------------------------------------------ template<class t_core> + bool t_cryptonote_protocol_handler<t_core>::should_ask_for_pruned_data(cryptonote_connection_context& context, uint64_t first_block_height, uint64_t nblocks, bool check_block_weights) const + { + if (!m_sync_pruned_blocks) + return false; + if (!m_core.is_within_compiled_block_hash_area(first_block_height + nblocks - 1)) + return false; + const uint32_t local_stripe = tools::get_pruning_stripe(m_core.get_blockchain_pruning_seed()); + if (local_stripe == 0) + return false; + // assumes the span size is less or equal to the stripe size + bool full_data_needed = tools::get_pruning_stripe(first_block_height, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES) == local_stripe + || tools::get_pruning_stripe(first_block_height + nblocks - 1, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES) == local_stripe; + if (full_data_needed) + return false; + if (check_block_weights && !m_core.has_block_weights(first_block_height, nblocks)) + return false; + return true; + } + //------------------------------------------------------------------------------------------------------------------------ + template<class t_core> bool t_cryptonote_protocol_handler<t_core>::request_missing_objects(cryptonote_connection_context& context, bool check_having_blocks, bool force_next_span) { // flush stale spans @@ -1739,6 +1830,7 @@ skip: const auto next_needed_pruning_stripe = get_next_needed_pruning_stripe(); const uint32_t add_stripe = tools::get_pruning_stripe(bc_height, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES); const uint32_t peer_stripe = tools::get_pruning_stripe(context.m_pruning_seed); + const uint32_t local_stripe = tools::get_pruning_stripe(m_core.get_blockchain_pruning_seed()); const size_t block_queue_size_threshold = m_block_download_max_size ? m_block_download_max_size : BLOCK_QUEUE_SIZE_THRESHOLD; bool queue_proceed = nspans < BLOCK_QUEUE_NSPANS_THRESHOLD || size < block_queue_size_threshold; // get rid of blocks we already requested, or already have @@ -1749,7 +1841,7 @@ skip: next_block_height = next_needed_height; else next_block_height = context.m_last_response_height - context.m_needed_objects.size() + 1; - bool stripe_proceed_main = (add_stripe == 0 || peer_stripe == 0 || add_stripe == peer_stripe) && (next_block_height < bc_height + BLOCK_QUEUE_FORCE_DOWNLOAD_NEAR_BLOCKS || next_needed_height < bc_height + BLOCK_QUEUE_FORCE_DOWNLOAD_NEAR_BLOCKS); + bool stripe_proceed_main = ((m_sync_pruned_blocks && peer_stripe == local_stripe) || add_stripe == 0 || peer_stripe == 0 || add_stripe == peer_stripe) && (next_block_height < bc_height + BLOCK_QUEUE_FORCE_DOWNLOAD_NEAR_BLOCKS || next_needed_height < bc_height + BLOCK_QUEUE_FORCE_DOWNLOAD_NEAR_BLOCKS); bool stripe_proceed_secondary = tools::has_unpruned_block(next_block_height, context.m_remote_blockchain_height, context.m_pruning_seed); bool proceed = stripe_proceed_main || (queue_proceed && stripe_proceed_secondary); if (!stripe_proceed_main && !stripe_proceed_secondary && should_drop_connection(context, tools::get_pruning_stripe(next_block_height, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES))) @@ -1812,8 +1904,9 @@ skip: { const uint64_t now = tools::get_tick_count(); const uint64_t dt = now - m_last_add_end_time; - if (tools::ticks_to_ns(dt) >= DROP_ON_SYNC_WEDGE_THRESHOLD) + if (m_last_add_end_time && tools::ticks_to_ns(dt) >= DROP_ON_SYNC_WEDGE_THRESHOLD) { + MDEBUG(context << "ns " << tools::ticks_to_ns(dt) << " from " << m_last_add_end_time << " and " << now); MDEBUG(context << "Block addition seems to have wedged, dropping connection"); return false; } @@ -1880,7 +1973,8 @@ skip: skip_unneeded_hashes(context, false); const uint64_t first_block_height = context.m_last_response_height - context.m_needed_objects.size() + 1; - span = m_block_queue.reserve_span(first_block_height, context.m_last_response_height, count_limit, context.m_connection_id, context.m_pruning_seed, context.m_remote_blockchain_height, context.m_needed_objects); + bool sync_pruned_blocks = m_sync_pruned_blocks && m_core.get_blockchain_pruning_seed(); + span = m_block_queue.reserve_span(first_block_height, context.m_last_response_height, count_limit, context.m_connection_id, sync_pruned_blocks, m_core.get_blockchain_pruning_seed(), context.m_pruning_seed, context.m_remote_blockchain_height, context.m_needed_objects); MDEBUG(context << " span from " << first_block_height << ": " << span.first << "/" << span.second); if (span.second > 0) { @@ -1910,7 +2004,8 @@ skip: ++count; context.m_requested_objects.insert(hash); // that's atrocious O(n) wise, but this is rare - auto i = std::find(context.m_needed_objects.begin(), context.m_needed_objects.end(), hash); + auto i = std::find_if(context.m_needed_objects.begin(), context.m_needed_objects.end(), + [&hash](const std::pair<crypto::hash, uint64_t> &o) { return o.first == hash; }); if (i != context.m_needed_objects.end()) context.m_needed_objects.erase(i); } @@ -1929,7 +2024,7 @@ skip: return false; } if (skip > 0) - context.m_needed_objects = std::vector<crypto::hash>(context.m_needed_objects.begin() + skip, context.m_needed_objects.end()); + context.m_needed_objects = std::vector<std::pair<crypto::hash, uint64_t>>(context.m_needed_objects.begin() + skip, context.m_needed_objects.end()); if (context.m_needed_objects.size() < span.second) { MERROR("ERROR: span " << span.first << "/" << span.second << ", m_needed_objects " << context.m_needed_objects.size()); @@ -1938,18 +2033,37 @@ skip: for (size_t n = 0; n < span.second; ++n) { - req.blocks.push_back(context.m_needed_objects[n]); + req.blocks.push_back(context.m_needed_objects[n].first); ++count; - context.m_requested_objects.insert(context.m_needed_objects[n]); + context.m_requested_objects.insert(context.m_needed_objects[n].first); } - context.m_needed_objects = std::vector<crypto::hash>(context.m_needed_objects.begin() + span.second, context.m_needed_objects.end()); + context.m_needed_objects = std::vector<std::pair<crypto::hash, uint64_t>>(context.m_needed_objects.begin() + span.second, context.m_needed_objects.end()); } + req.prune = should_ask_for_pruned_data(context, span.first, span.second, true); + + // if we need to ask for full data and that peer does not have the right stripe, we can't ask it + if (!req.prune && context.m_pruning_seed) + { + const uint32_t peer_stripe = tools::get_pruning_stripe(context.m_pruning_seed); + const uint32_t first_stripe = tools::get_pruning_stripe(span.first, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES); + const uint32_t last_stripe = tools::get_pruning_stripe(span.first + span.second - 1, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES); + if ((first_stripe && peer_stripe != first_stripe) || (last_stripe && peer_stripe != last_stripe)) + { + MDEBUG(context << "We need full data, but the peer does not have it, dropping peer"); + return false; + } + } context.m_last_request_time = boost::posix_time::microsec_clock::universal_time(); MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_GET_OBJECTS: blocks.size()=" << req.blocks.size() << "requested blocks count=" << count << " / " << count_limit << " from " << span.first << ", first hash " << req.blocks.front()); //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size()); + MDEBUG("Asking for " << (req.prune ? "pruned" : "full") << " data, start/end " + << tools::get_pruning_stripe(span.first, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES) + << "/" << tools::get_pruning_stripe(span.first + span.second - 1, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES) + << ", ours " << tools::get_pruning_stripe(m_core.get_blockchain_pruning_seed()) << ", peer stripe " << tools::get_pruning_stripe(context.m_pruning_seed)); + post_notify<NOTIFY_REQUEST_GET_OBJECTS>(req, context); MLOG_PEER_STATE("requesting objects"); return true; @@ -1959,7 +2073,8 @@ skip: // drop it to make space for other peers, or ask for a span further down the line const uint32_t next_stripe = get_next_needed_pruning_stripe().first; const uint32_t peer_stripe = tools::get_pruning_stripe(context.m_pruning_seed); - if (next_stripe && peer_stripe && next_stripe != peer_stripe) + const uint32_t local_stripe = tools::get_pruning_stripe(m_core.get_blockchain_pruning_seed()); + if (!(m_sync_pruned_blocks && peer_stripe == local_stripe) && next_stripe && peer_stripe && next_stripe != peer_stripe) { // at this point, we have to either close the connection, or start getting blocks past the // current point, or become dormant @@ -2022,6 +2137,7 @@ skip: } handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?) + r.prune = m_sync_pruned_blocks; //std::string blob; // for calculate size of request //epee::serialization::store_t_to_binary(r, blob); @@ -2128,6 +2244,12 @@ skip: drop_connection(context, true, false); return 1; } + if (!arg.m_block_weights.empty() && arg.m_block_weights.size() != arg.m_block_ids.size()) + { + LOG_ERROR_CCONTEXT("sent invalid block weight array, dropping connection"); + drop_connection(context, true, false); + return 1; + } MDEBUG(context << "first block hash " << arg.m_block_ids.front() << ", last " << arg.m_block_ids.back()); if (arg.total_height >= CRYPTONOTE_MAX_BLOCK_NUMBER || arg.m_block_ids.size() >= CRYPTONOTE_MAX_BLOCK_NUMBER) @@ -2147,7 +2269,7 @@ skip: return 1; } - uint64_t n_use_blocks = m_core.prevalidate_block_hashes(arg.start_height, arg.m_block_ids); + uint64_t n_use_blocks = m_core.prevalidate_block_hashes(arg.start_height, arg.m_block_ids, arg.m_block_weights); if (n_use_blocks + HASH_OF_HASHES_STEP <= arg.m_block_ids.size()) { LOG_ERROR_CCONTEXT("Most blocks are invalid, dropping connection"); @@ -2157,9 +2279,10 @@ skip: context.m_needed_objects.clear(); uint64_t added = 0; - for(auto& bl_id: arg.m_block_ids) + for (size_t i = 0; i < arg.m_block_ids.size(); ++i) { - context.m_needed_objects.push_back(bl_id); + const uint64_t block_weight = arg.m_block_weights.empty() ? 0 : arg.m_block_weights[i]; + context.m_needed_objects.push_back(std::make_pair(arg.m_block_ids[i], block_weight)); if (++added == n_use_blocks) break; } @@ -2183,7 +2306,7 @@ skip: { NOTIFY_NEW_FLUFFY_BLOCK::request fluffy_arg = AUTO_VAL_INIT(fluffy_arg); fluffy_arg.current_blockchain_height = arg.current_blockchain_height; - std::vector<blobdata> fluffy_txs; + std::vector<tx_blob_entry> fluffy_txs; fluffy_arg.b = arg.b; fluffy_arg.b.txs = fluffy_txs; |