diff options
author | Riccardo Spagni <ric@spagni.net> | 2018-12-12 11:59:25 +0200 |
---|---|---|
committer | Riccardo Spagni <ric@spagni.net> | 2018-12-12 11:59:25 +0200 |
commit | 83684ea515182118396e64afd8d85168c761c107 (patch) | |
tree | de38bc4a744e771610d81f5e76cbec478d0aeb26 /src | |
parent | Merge pull request #4908 (diff) | |
parent | blockchain: avoid unnecessary DB lookups when syncing (diff) | |
download | monero-83684ea515182118396e64afd8d85168c761c107.tar.xz |
Merge pull request #4909
756684bb blockchain: avoid unnecessary DB lookups when syncing (moneromooo-monero)
Diffstat (limited to 'src')
-rw-r--r-- | src/cryptonote_core/blockchain.cpp | 117 | ||||
-rw-r--r-- | src/cryptonote_core/blockchain.h | 4 |
2 files changed, 86 insertions, 35 deletions
diff --git a/src/cryptonote_core/blockchain.cpp b/src/cryptonote_core/blockchain.cpp index 1cc7d718c..c6a3c6180 100644 --- a/src/cryptonote_core/blockchain.cpp +++ b/src/cryptonote_core/blockchain.cpp @@ -3755,7 +3755,7 @@ void Blockchain::set_enforce_dns_checkpoints(bool enforce_checkpoints) } //------------------------------------------------------------------ -void Blockchain::block_longhash_worker(uint64_t height, const std::vector<block> &blocks, std::unordered_map<crypto::hash, crypto::hash> &map) const +void Blockchain::block_longhash_worker(uint64_t height, const epee::span<const block> &blocks, std::unordered_map<crypto::hash, crypto::hash> &map) const { TIME_MEASURE_START(t); slow_hash_allocate_state(); @@ -3841,11 +3841,33 @@ bool Blockchain::cleanup_handle_incoming_blocks(bool force_sync) } //------------------------------------------------------------------ -void Blockchain::output_scan_worker(const uint64_t amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs) const +void Blockchain::output_scan_worker(const uint64_t amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, const std::vector<output_data_t> &extra_tx_map) const { try { m_db->get_output_key(epee::span<const uint64_t>(&amount, 1), offsets, outputs, true); + if (outputs.size() < offsets.size()) + { + const uint64_t n_outputs = m_db->get_num_outputs(amount); + for (size_t i = outputs.size(); i < offsets.size(); ++i) + { + uint64_t idx = offsets[i]; + if (idx < n_outputs) + { + MWARNING("Index " << idx << " not found in db for amount " << amount << ", but it is less than the number of entries"); + break; + } + else if (idx < n_outputs + extra_tx_map.size()) + { + outputs.push_back(extra_tx_map[idx - n_outputs]); + } + else + { + MWARNING("missed " << amount << "/" << idx << " in " << extra_tx_map.size() << " (chain " << n_outputs << ")"); + break; + } + } + } } catch (const std::exception& e) { @@ -3960,6 +3982,34 @@ uint64_t Blockchain::prevalidate_block_hashes(uint64_t height, const std::vector // vs [k_image, output_keys] (m_scan_table). This is faster because it takes advantage of bulk queries // and is threaded if possible. The table (m_scan_table) will be used later when querying output // keys. +static bool update_output_map(std::map<uint64_t, std::vector<output_data_t>> &extra_tx_map, const transaction &tx, uint64_t height, bool miner) +{ + MTRACE("Blockchain::" << __func__); + for (size_t i = 0; i < tx.vout.size(); ++i) + { + const auto &out = tx.vout[i]; + if (out.target.type() != typeid(txout_to_key)) + continue; + const txout_to_key &out_to_key = boost::get<txout_to_key>(out.target); + rct::key commitment; + uint64_t amount = out.amount; + if (miner && tx.version == 2) + { + commitment = rct::zeroCommit(amount); + amount = 0; + } + else if (tx.version > 1) + { + CHECK_AND_ASSERT_MES(i < tx.rct_signatures.outPk.size(), false, "Invalid outPk size"); + commitment = tx.rct_signatures.outPk[i].mask; + } + else + commitment = rct::zero(); + extra_tx_map[amount].push_back(output_data_t{out_to_key.key, tx.unlock_time, height, commitment}); + } + return true; +} + bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete_entry> &blocks_entry) { MTRACE("Blockchain::" << __func__); @@ -4006,42 +4056,40 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete m_blockchain_lock.lock(); } - if ((m_db->height() + blocks_entry.size()) < m_blocks_hash_check.size()) + const uint64_t height = m_db->height(); + if ((height + blocks_entry.size()) < m_blocks_hash_check.size()) return true; bool blocks_exist = false; tools::threadpool& tpool = tools::threadpool::getInstance(); - uint64_t threads = tpool.get_max_concurrency(); + unsigned threads = tpool.get_max_concurrency(); + std::vector<block> blocks; + blocks.resize(blocks_entry.size()); - if (blocks_entry.size() > 1 && threads > 1 && m_max_prepare_blocks_threads > 1) + if (1) { // limit threads, default limit = 4 if(threads > m_max_prepare_blocks_threads) threads = m_max_prepare_blocks_threads; - uint64_t height = m_db->height(); - int batches = blocks_entry.size() / threads; - int extra = blocks_entry.size() % threads; + unsigned int batches = blocks_entry.size() / threads; + unsigned int extra = blocks_entry.size() % threads; MDEBUG("block_batches: " << batches); std::vector<std::unordered_map<crypto::hash, crypto::hash>> maps(threads); - std::vector < std::vector < block >> blocks(threads); auto it = blocks_entry.begin(); + unsigned blockidx = 0; - for (uint64_t i = 0; i < threads; i++) + for (unsigned i = 0; i < threads; i++) { - blocks[i].reserve(batches + 1); - for (int j = 0; j < batches; j++) + for (unsigned int j = 0; j < batches; j++, ++blockidx) { - block block; + block &block = blocks[blockidx]; if (!parse_and_validate_block_from_blob(it->block, block)) - { - std::advance(it, 1); - continue; - } + return false; // check first block and skip all blocks if its not chained properly - if (i == 0 && j == 0) + if (blockidx == 0) { crypto::hash tophash = m_db->top_block_hash(); if (block.prev_id != tophash) @@ -4056,20 +4104,16 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete break; } - blocks[i].push_back(std::move(block)); std::advance(it, 1); } } - for (int i = 0; i < extra && !blocks_exist; i++) + for (unsigned i = 0; i < extra && !blocks_exist; i++, blockidx++) { - block block; + block &block = blocks[blockidx]; if (!parse_and_validate_block_from_blob(it->block, block)) - { - std::advance(it, 1); - continue; - } + return false; if (have_block(get_block_hash(block))) { @@ -4077,7 +4121,6 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete break; } - blocks[i].push_back(std::move(block)); std::advance(it, 1); } @@ -4086,10 +4129,13 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete m_blocks_longhash_table.clear(); uint64_t thread_height = height; tools::threadpool::waiter waiter; - for (uint64_t i = 0; i < threads; i++) + for (unsigned int i = 0; i < threads; i++) { - tpool.submit(&waiter, boost::bind(&Blockchain::block_longhash_worker, this, thread_height, std::cref(blocks[i]), std::ref(maps[i])), true); - thread_height += blocks[i].size(); + unsigned nblocks = batches; + if (i < extra) + ++nblocks; + tpool.submit(&waiter, boost::bind(&Blockchain::block_longhash_worker, this, thread_height, epee::span<const block>(&blocks[i], nblocks), std::ref(maps[i])), true); + thread_height += nblocks; } waiter.wait(&tpool); @@ -4132,7 +4178,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete // [input] stores all absolute_offsets for each amount std::map<uint64_t, std::vector<uint64_t>> offset_map; // [output] stores all output_data_t for each absolute_offset - std::map<uint64_t, std::vector<output_data_t>> tx_map; + std::map<uint64_t, std::vector<output_data_t>> tx_map, extra_tx_map; std::vector<std::pair<cryptonote::transaction, crypto::hash>> txes(total_txs); #define SCAN_TABLE_QUIT(m) \ @@ -4143,12 +4189,14 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete } while(0); \ // generate sorted tables for all amounts and absolute offsets - size_t tx_index = 0; + size_t tx_index = 0, block_index = 0; for (const auto &entry : blocks_entry) { if (m_cancel) return false; + if (!update_output_map(extra_tx_map, blocks[block_index].miner_tx, height + block_index, true)) + SCAN_TABLE_QUIT("Error building extra tx map."); for (const auto &tx_blob : entry.txs) { if (tx_index >= txes.size()) @@ -4207,7 +4255,10 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete offset_map[in_to_key.amount].push_back(offset); } + if (!update_output_map(extra_tx_map, tx, height + block_index, false)) + SCAN_TABLE_QUIT("Error building extra tx map."); } + ++block_index; } // sort and remove duplicate absolute_offsets in offset_map @@ -4230,7 +4281,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete for (size_t i = 0; i < amounts.size(); i++) { uint64_t amount = amounts[i]; - tpool.submit(&waiter, boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount])), true); + tpool.submit(&waiter, boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount]), std::cref(extra_tx_map[amount])), true); } waiter.wait(&tpool); } @@ -4239,7 +4290,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete for (size_t i = 0; i < amounts.size(); i++) { uint64_t amount = amounts[i]; - output_scan_worker(amount, offset_map[amount], tx_map[amount]); + output_scan_worker(amount, offset_map[amount], tx_map[amount], extra_tx_map[amount]); } } diff --git a/src/cryptonote_core/blockchain.h b/src/cryptonote_core/blockchain.h index f1e366c9e..877828f81 100644 --- a/src/cryptonote_core/blockchain.h +++ b/src/cryptonote_core/blockchain.h @@ -918,7 +918,7 @@ namespace cryptonote * @param outputs return-by-reference the outputs collected */ void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets, - std::vector<output_data_t> &outputs) const; + std::vector<output_data_t> &outputs, const std::vector<output_data_t> &extra_tx_map) const; /** * @brief computes the "short" and "long" hashes for a set of blocks @@ -927,7 +927,7 @@ namespace cryptonote * @param blocks the blocks to be hashed * @param map return-by-reference the hashes for each block */ - void block_longhash_worker(uint64_t height, const std::vector<block> &blocks, + void block_longhash_worker(uint64_t height, const epee::span<const block> &blocks, std::unordered_map<crypto::hash, crypto::hash> &map) const; /** |