aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt96
-rw-r--r--src/CMakeLists.txt16
-rw-r--r--src/blockchain_db/berkeleydb/db_bdb.h28
-rw-r--r--src/blockchain_db/blockchain_db.h26
-rw-r--r--src/blockchain_db/lmdb/db_lmdb.cpp498
-rw-r--r--src/blockchain_db/lmdb/db_lmdb.h12
-rw-r--r--src/blocks/blockexports.c16
-rw-r--r--src/common/dns_utils.cpp77
-rw-r--r--src/crypto/aesb.c134
-rw-r--r--src/crypto/slow-hash.c190
-rw-r--r--src/cryptonote_core/CMakeLists.txt6
-rw-r--r--src/cryptonote_core/blockchain.cpp4542
-rw-r--r--src/cryptonote_core/blockchain.h64
-rw-r--r--src/cryptonote_core/blockchain_storage.cpp90
-rw-r--r--src/cryptonote_core/checkpoints_create.cpp76
-rw-r--r--src/cryptonote_core/cryptonote_basic_impl.cpp18
-rw-r--r--src/cryptonote_core/cryptonote_core.cpp62
-rw-r--r--src/cryptonote_core/cryptonote_core.h18
-rw-r--r--src/cryptonote_core/difficulty.cpp82
-rw-r--r--src/cryptonote_core/miner.cpp29
-rw-r--r--src/cryptonote_core/tx_pool.cpp24
-rw-r--r--src/cryptonote_core/tx_pool.h16
-rw-r--r--src/cryptonote_protocol/cryptonote_protocol_handler.inl555
-rw-r--r--src/daemon/CMakeLists.txt12
-rw-r--r--src/daemon/main.cpp22
-rw-r--r--src/p2p/net_node.h66
-rw-r--r--src/p2p/net_node.inl406
27 files changed, 3599 insertions, 3582 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index aef7a52e6..adf5ed364 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,21 +1,21 @@
# Copyright (c) 2014-2015, The Monero Project
-#
+#
# All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
-#
+#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
+#
# Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
cmake_minimum_required(VERSION 2.8.7)
@@ -46,30 +46,30 @@ function (die msg)
endfunction ()
if (NOT ${ARCH} STREQUAL "")
- string(SUBSTRING ${ARCH} 0 3 IS_ARM)
- string(TOLOWER ${IS_ARM} IS_ARM)
-
- if (${IS_ARM} STREQUAL "arm")
- string(SUBSTRING ${ARCH} 0 5 ARM_TEST)
- string(TOLOWER ${ARM_TEST} ARM_TEST)
-
- if (${ARM_TEST} STREQUAL "armv6")
- set(ARM6 1)
- else()
- set(ARM6 0)
- endif()
-
- if (${ARM_TEST} STREQUAL "armv7")
- set(ARM7 1)
- else()
- set(ARM7 0)
- endif()
- endif()
+ string(SUBSTRING ${ARCH} 0 3 IS_ARM)
+ string(TOLOWER ${IS_ARM} IS_ARM)
+
+ if (${IS_ARM} STREQUAL "arm")
+ string(SUBSTRING ${ARCH} 0 5 ARM_TEST)
+ string(TOLOWER ${ARM_TEST} ARM_TEST)
+
+ if (${ARM_TEST} STREQUAL "armv6")
+ set(ARM6 1)
+ else()
+ set(ARM6 0)
+ endif()
+
+ if (${ARM_TEST} STREQUAL "armv7")
+ set(ARM7 1)
+ else()
+ set(ARM7 0)
+ endif()
+ endif()
endif()
if(WIN32 OR ARM7 OR ARM6)
- set(CMAKE_C_FLAGS_RELEASE "-O2 -DNDEBUG")
- set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG")
+ set(CMAKE_C_FLAGS_RELEASE "-O2 -DNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG")
endif()
# set this to 0 if per-block checkpoint needs to be disabled
@@ -137,7 +137,7 @@ elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly.*|FreeBSD")
endif()
# TODO: check bsdi, NetBSD, OpenBSD, to see if they need the same FreeBSD changes
-#
+#
# elseif(CMAKE_SYSTEM_NAME MATCHES "kNetBSD.*|NetBSD.*")
# set(NETBSD TRUE)
# elseif(CMAKE_SYSTEM_NAME MATCHES "kOpenBSD.*|OpenBSD.*")
@@ -194,27 +194,27 @@ endif()
set(BERKELEY_DB 0)
if (DATABASE STREQUAL "lmdb")
set(BLOCKCHAIN_DB DB_LMDB)
-
+
# temporarily allow mingw to compile with berkeley_db,
# regardless if building static or not
if(NOT STATIC OR MINGW)
- find_package(BerkeleyDB)
-
- if(NOT BERKELEY_DB_LIBRARIES)
- message(STATUS "BerkeleyDB not found and has been disabled.")
- else()
- message(STATUS "Found BerkeleyDB include (db.h) in ${BERKELEY_DB_INCLUDE_DIR}")
- if(BERKELEY_DB_LIBRARIES)
- message(STATUS "Found BerkeleyDB shared library")
- set(BDB_STATIC false CACHE BOOL "BDB Static flag")
- set(BDB_INCLUDE ${BERKELEY_DB_INCLUDE_DIR} CACHE STRING "BDB include path")
- set(BDB_LIBRARY ${BERKELEY_DB_LIBRARIES} CACHE STRING "BDB library name")
- set(BDB_LIBRARY_DIRS "" CACHE STRING "BDB Library dirs")
- set(BERKELEY_DB 1)
- else()
- message(STATUS "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev or the equivalent")
- endif()
- endif()
+ find_package(BerkeleyDB)
+
+ if(NOT BERKELEY_DB_LIBRARIES)
+ message(STATUS "BerkeleyDB not found and has been disabled.")
+ else()
+ message(STATUS "Found BerkeleyDB include (db.h) in ${BERKELEY_DB_INCLUDE_DIR}")
+ if(BERKELEY_DB_LIBRARIES)
+ message(STATUS "Found BerkeleyDB shared library")
+ set(BDB_STATIC false CACHE BOOL "BDB Static flag")
+ set(BDB_INCLUDE ${BERKELEY_DB_INCLUDE_DIR} CACHE STRING "BDB include path")
+ set(BDB_LIBRARY ${BERKELEY_DB_LIBRARIES} CACHE STRING "BDB library name")
+ set(BDB_LIBRARY_DIRS "" CACHE STRING "BDB Library dirs")
+ set(BERKELEY_DB 1)
+ else()
+ message(STATUS "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev or the equivalent")
+ endif()
+ endif()
endif()
if (BERKELEY_DB AND (ARCH_WIDTH STREQUAL "32" OR ARM6 OR ARM7))
@@ -234,7 +234,7 @@ endif()
if(BERKELEY_DB)
add_definitions("-DBERKELEY_DB")
endif()
-
+
add_definitions("-DBLOCKCHAIN_DB=${BLOCKCHAIN_DB}")
if (UNIX AND NOT APPLE)
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 34c52919e..c3fecadae 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,21 +1,21 @@
# Copyright (c) 2014-2015, The Monero Project
-#
+#
# All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
-#
+#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
+#
# Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
if (WIN32 OR STATIC)
@@ -105,5 +105,5 @@ add_subdirectory(daemon)
add_subdirectory(blockchain_utilities)
if(PER_BLOCK_CHECKPOINT)
- add_subdirectory(blocks)
+ add_subdirectory(blocks)
endif()
diff --git a/src/blockchain_db/berkeleydb/db_bdb.h b/src/blockchain_db/berkeleydb/db_bdb.h
index c79d8b26f..ce3da91e8 100644
--- a/src/blockchain_db/berkeleydb/db_bdb.h
+++ b/src/blockchain_db/berkeleydb/db_bdb.h
@@ -1,20 +1,20 @@
// Copyright (c) 2014, The Monero Project
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -99,18 +99,18 @@ private:
template <typename T>
class bdb_safe_buffer
{
- // limit the number of buffers to 8
- const size_t MaxAllowedBuffers = 8;
+ // limit the number of buffers to 8
+ const size_t MaxAllowedBuffers = 8;
public:
bdb_safe_buffer(size_t num_buffers, size_t count)
{
- if(num_buffers > MaxAllowedBuffers)
- num_buffers = MaxAllowedBuffers;
-
- set_count(num_buffers);
- for (size_t i = 0; i < num_buffers; i++)
- m_buffers.push_back((T) malloc(sizeof(T) * count));
- m_buffer_count = count;
+ if(num_buffers > MaxAllowedBuffers)
+ num_buffers = MaxAllowedBuffers;
+
+ set_count(num_buffers);
+ for (size_t i = 0; i < num_buffers; i++)
+ m_buffers.push_back((T) malloc(sizeof(T) * count));
+ m_buffer_count = count;
}
~bdb_safe_buffer()
diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h
index 193a34386..702de18b5 100644
--- a/src/blockchain_db/blockchain_db.h
+++ b/src/blockchain_db/blockchain_db.h
@@ -1,21 +1,21 @@
// Copyright (c) 2014, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -85,7 +85,7 @@
* size_t get_block_size(height)
* difficulty get_block_cumulative_difficulty(height)
* uint64_t get_block_already_generated_coins(height)
- * uint64_t get_block_timestamp(height)
+ * uint64_t get_block_timestamp(height)
* uint64_t get_top_block_timestamp()
* hash get_block_hash_from_height(height)
* blocks get_blocks_range(height1, height2)
@@ -139,12 +139,12 @@ typedef std::pair<crypto::hash, uint64_t> tx_out_index;
#pragma pack(push, 1)
struct output_data_t
{
- crypto::public_key pubkey;
- uint64_t unlock_time;
- uint64_t height;
+ crypto::public_key pubkey;
+ uint64_t unlock_time;
+ uint64_t height;
};
#pragma pack(pop)
-
+
/***********************************
* Exception Definitions
***********************************/
@@ -300,7 +300,7 @@ private:
/*********************************************************************
* private concrete members
- *********************************************************************/
+ *********************************************************************/
// private version of pop_block, for undoing if an add_block goes tits up
void pop_block();
@@ -479,7 +479,7 @@ public:
virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) = 0;
virtual void get_output_tx_and_index(const uint64_t& amount, const std::vector<uint64_t> &offsets, std::vector<tx_out_index> &indices) = 0;
virtual void get_output_key(const uint64_t &amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs) = 0;
-
+
virtual bool can_thread_bulk_indices() const = 0;
// return a vector of indices corresponding to the global output index for
diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp
index f76fee87a..4a4550179 100644
--- a/src/blockchain_db/lmdb/db_lmdb.cpp
+++ b/src/blockchain_db/lmdb/db_lmdb.cpp
@@ -1,20 +1,20 @@
// Copyright (c) 2014, The Monero Project
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -72,19 +72,19 @@ struct lmdb_cur
done = false;
}
- ~lmdb_cur()
- {
- close();
- }
+ ~lmdb_cur()
+ {
+ close();
+ }
- operator MDB_cursor*()
- {
- return m_cur;
- }
- operator MDB_cursor**()
- {
- return &m_cur;
- }
+ operator MDB_cursor*()
+ {
+ return m_cur;
+ }
+ operator MDB_cursor**()
+ {
+ return &m_cur;
+ }
void close()
{
@@ -103,8 +103,8 @@ private:
template<typename T>
struct MDB_val_copy: public MDB_val
{
- MDB_val_copy(const T &t) :
- t_copy(t)
+ MDB_val_copy(const T &t) :
+ t_copy(t)
{
mv_size = sizeof (T);
mv_data = &t_copy;
@@ -116,8 +116,8 @@ private:
template<>
struct MDB_val_copy<cryptonote::blobdata>: public MDB_val
{
- MDB_val_copy(const cryptonote::blobdata &bd) :
- data(new char[bd.size()])
+ MDB_val_copy(const cryptonote::blobdata &bd) :
+ data(new char[bd.size()])
{
memcpy(data.get(), bd.data(), bd.size());
mv_size = bd.size();
@@ -130,8 +130,8 @@ private:
template<>
struct MDB_val_copy<const char*>: public MDB_val
{
- MDB_val_copy(const char *s) :
- data(strdup(s))
+ MDB_val_copy(const char *s) :
+ data(strdup(s))
{
mv_size = strlen(s) + 1; // include the NUL, makes it easier for compares
mv_data = data.get();
@@ -160,16 +160,16 @@ auto compare_uint8 = [](const MDB_val *a, const MDB_val *b)
int compare_hash32(const MDB_val *a, const MDB_val *b)
{
- uint32_t *va = (uint32_t*) a->mv_data;
- uint32_t *vb = (uint32_t*) b->mv_data;
- for (int n = 7; n >= 0; n--)
- {
- if (va[n] == vb[n])
- continue;
- return va[n] < vb[n] ? -1 : 1;
- }
+ uint32_t *va = (uint32_t*) a->mv_data;
+ uint32_t *vb = (uint32_t*) b->mv_data;
+ for (int n = 7; n >= 0; n--)
+ {
+ if (va[n] == vb[n])
+ continue;
+ return va[n] < vb[n] ? -1 : 1;
+ }
- return 0;
+ return 0;
}
int compare_string(const MDB_val *a, const MDB_val *b)
@@ -313,18 +313,18 @@ void BlockchainLMDB::do_resize(uint64_t increase_size)
// check disk capacity
try
{
- boost::filesystem::path path(m_folder);
- boost::filesystem::space_info si = boost::filesystem::space(path);
- if(si.available < add_size)
- {
- LOG_PRINT_RED_L0("!! WARNING: Insufficient free space to extend database !!: " << si.available / 1LL << 20L);
- return;
- }
+ boost::filesystem::path path(m_folder);
+ boost::filesystem::space_info si = boost::filesystem::space(path);
+ if(si.available < add_size)
+ {
+ LOG_PRINT_RED_L0("!! WARNING: Insufficient free space to extend database !!: " << si.available / 1LL << 20L);
+ return;
+ }
}
catch(...)
{
- // print something but proceed.
- LOG_PRINT_YELLOW("Unable to query free disk space.", LOG_LEVEL_0);
+ // print something but proceed.
+ LOG_PRINT_YELLOW("Unable to query free disk space.", LOG_LEVEL_0);
}
MDB_envinfo mei;
@@ -417,7 +417,7 @@ bool BlockchainLMDB::need_resize(uint64_t threshold_size) const
}
return false;
#else
- return false;
+ return false;
#endif
}
@@ -505,7 +505,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks) con
}
void BlockchainLMDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated,
- const crypto::hash& blk_hash)
+ const crypto::hash& blk_hash)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
@@ -685,15 +685,15 @@ void BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_ou
if (tx_output.target.type() == typeid(txout_to_key))
{
- output_data_t od;
- od.pubkey = boost::get < txout_to_key > (tx_output.target).key;
- od.unlock_time = unlock_time;
- od.height = m_height;
+ output_data_t od;
+ od.pubkey = boost::get < txout_to_key > (tx_output.target).key;
+ od.unlock_time = unlock_time;
+ od.height = m_height;
- MDB_val_copy<output_data_t> data(od);
- //MDB_val_copy<crypto::public_key> val_pubkey(boost::get<txout_to_key>(tx_output.target).key);
- if (mdb_put(*m_write_txn, m_output_keys, &k, &data, 0))
- throw0(DB_ERROR("Failed to add output pubkey to db transaction"));
+ MDB_val_copy<output_data_t> data(od);
+ //MDB_val_copy<crypto::public_key> val_pubkey(boost::get<txout_to_key>(tx_output.target).key);
+ if (mdb_put(*m_write_txn, m_output_keys, &k, &data, 0))
+ throw0(DB_ERROR("Failed to add output pubkey to db transaction"));
}
m_num_outputs++;
@@ -891,17 +891,17 @@ tx_out BlockchainLMDB::output_from_blob(const blobdata& blob) const
return o;
}
-uint64_t BlockchainLMDB::get_output_global_index(const uint64_t& amount, const uint64_t& index)
+uint64_t BlockchainLMDB::get_output_global_index(const uint64_t& amount, const uint64_t& index)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- std::vector <uint64_t> offsets;
- std::vector <uint64_t> global_indices;
- offsets.push_back(index);
- get_output_global_indices(amount, offsets, global_indices);
- if (!global_indices.size())
+ std::vector <uint64_t> offsets;
+ std::vector <uint64_t> global_indices;
+ offsets.push_back(index);
+ get_output_global_indices(amount, offsets, global_indices);
+ if (!global_indices.size())
throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found"));
- return global_indices[0];
+ return global_indices[0];
}
void BlockchainLMDB::check_open() const
@@ -956,7 +956,7 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags)
// check for existing LMDB files in base directory
boost::filesystem::path old_files = direc.parent_path();
- if (boost::filesystem::exists(old_files / "data.mdb") || boost::filesystem::exists(old_files / "lock.mdb"))
+ if (boost::filesystem::exists(old_files / "data.mdb") || boost::filesystem::exists(old_files / "lock.mdb"))
{
LOG_PRINT_L0("Found existing LMDB files in " << old_files.string());
LOG_PRINT_L0("Move data.mdb and/or lock.mdb to " << filename << ", or delete them, and then restart");
@@ -1022,7 +1022,7 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags)
lmdb_db_open(txn, LMDB_OUTPUT_TXS, MDB_INTEGERKEY | MDB_CREATE, m_output_txs, "Failed to open db handle for m_output_txs");
lmdb_db_open(txn, LMDB_OUTPUT_INDICES, MDB_INTEGERKEY | MDB_CREATE, m_output_indices, "Failed to open db handle for m_output_indices");
- lmdb_db_open(txn, LMDB_OUTPUT_AMOUNTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_output_amounts, "Failed to open db handle for m_output_amounts");
+ lmdb_db_open(txn, LMDB_OUTPUT_AMOUNTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_output_amounts, "Failed to open db handle for m_output_amounts");
lmdb_db_open(txn, LMDB_OUTPUT_KEYS, MDB_INTEGERKEY | MDB_CREATE, m_output_keys, "Failed to open db handle for m_output_keys");
lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_CREATE, m_spent_keys, "Failed to open db handle for m_spent_keys");
@@ -1057,26 +1057,26 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags)
bool compatible = true;
- // ND: This "new" version of the lmdb database is incompatible with
- // the previous version. Ensure that the output_keys database is
- // sizeof(output_data_t) in length. Otherwise, inform user and
- // terminate.
- if(m_height > 0)
- {
- MDB_val_copy<uint64_t> k(0);
- MDB_val v;
- auto get_result = mdb_get(txn, m_output_keys, &k, &v);
- if(get_result != MDB_SUCCESS)
- {
- txn.abort();
- m_open = false;
- return;
- }
-
- // LOG_PRINT_L0("Output keys size: " << v.mv_size);
- if(v.mv_size != sizeof(output_data_t))
- compatible = false;
- }
+ // ND: This "new" version of the lmdb database is incompatible with
+ // the previous version. Ensure that the output_keys database is
+ // sizeof(output_data_t) in length. Otherwise, inform user and
+ // terminate.
+ if(m_height > 0)
+ {
+ MDB_val_copy<uint64_t> k(0);
+ MDB_val v;
+ auto get_result = mdb_get(txn, m_output_keys, &k, &v);
+ if(get_result != MDB_SUCCESS)
+ {
+ txn.abort();
+ m_open = false;
+ return;
+ }
+
+ // LOG_PRINT_L0("Output keys size: " << v.mv_size);
+ if(v.mv_size != sizeof(output_data_t))
+ compatible = false;
+ }
MDB_val_copy<const char*> k("version");
MDB_val v;
@@ -1721,11 +1721,11 @@ output_data_t BlockchainLMDB::get_output_key(const uint64_t &global_index) const
output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index)
{
- LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- check_open();
+ LOG_PRINT_L3("BlockchainLMDB::" << __func__);
+ check_open();
- uint64_t glob_index = get_output_global_index(amount, index);
- return get_output_key(glob_index);
+ uint64_t glob_index = get_output_global_index(amount, index);
+ return get_output_key(glob_index);
}
tx_out_index BlockchainLMDB::get_output_tx_and_index_from_global(const uint64_t& index) const
@@ -1760,14 +1760,14 @@ tx_out_index BlockchainLMDB::get_output_tx_and_index_from_global(const uint64_t&
tx_out_index BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const uint64_t& index)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- std::vector < uint64_t > offsets;
- std::vector<tx_out_index> indices;
- offsets.push_back(index);
- get_output_tx_and_index(amount, offsets, indices);
- if (!indices.size())
+ std::vector < uint64_t > offsets;
+ std::vector<tx_out_index> indices;
+ offsets.push_back(index);
+ get_output_tx_and_index(amount, offsets, indices);
+ if (!indices.size())
throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found"));
- return indices[0];
+ return indices[0];
}
std::vector<uint64_t> BlockchainLMDB::get_tx_output_indices(const crypto::hash& h) const
@@ -2152,7 +2152,7 @@ void BlockchainLMDB::set_batch_transactions(bool batch_transactions)
}
uint64_t BlockchainLMDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated,
- const std::vector<transaction>& txs)
+ const std::vector<transaction>& txs)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
@@ -2235,192 +2235,192 @@ void BlockchainLMDB::pop_block(block& blk, std::vector<transaction>& txs)
}
void BlockchainLMDB::get_output_tx_and_index_from_global(const std::vector<uint64_t> &global_indices,
- std::vector<tx_out_index> &tx_out_indices) const
+ std::vector<tx_out_index> &tx_out_indices) const
{
- LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- check_open();
- tx_out_indices.clear();
+ LOG_PRINT_L3("BlockchainLMDB::" << __func__);
+ check_open();
+ tx_out_indices.clear();
- TXN_PREFIX_RDONLY();
+ TXN_PREFIX_RDONLY();
- for (const uint64_t &index : global_indices)
- {
- MDB_val_copy<uint64_t> k(index);
- MDB_val v;
+ for (const uint64_t &index : global_indices)
+ {
+ MDB_val_copy<uint64_t> k(index);
+ MDB_val v;
- auto get_result = mdb_get(*txn_ptr, m_output_txs, &k, &v);
- if (get_result == MDB_NOTFOUND)
- throw1(OUTPUT_DNE("output with given index not in db"));
- else if (get_result)
- throw0(DB_ERROR("DB error attempting to fetch output tx hash"));
+ auto get_result = mdb_get(*txn_ptr, m_output_txs, &k, &v);
+ if (get_result == MDB_NOTFOUND)
+ throw1(OUTPUT_DNE("output with given index not in db"));
+ else if (get_result)
+ throw0(DB_ERROR("DB error attempting to fetch output tx hash"));
- crypto::hash tx_hash = *(const crypto::hash*) v.mv_data;
+ crypto::hash tx_hash = *(const crypto::hash*) v.mv_data;
- get_result = mdb_get(*txn_ptr, m_output_indices, &k, &v);
- if (get_result == MDB_NOTFOUND)
- throw1(OUTPUT_DNE("output with given index not in db"));
- else if (get_result)
- throw0(DB_ERROR("DB error attempting to fetch output tx index"));
+ get_result = mdb_get(*txn_ptr, m_output_indices, &k, &v);
+ if (get_result == MDB_NOTFOUND)
+ throw1(OUTPUT_DNE("output with given index not in db"));
+ else if (get_result)
+ throw0(DB_ERROR("DB error attempting to fetch output tx index"));
- auto result = tx_out_index(tx_hash, *(const uint64_t *) v.mv_data);
- tx_out_indices.push_back(result);
- }
+ auto result = tx_out_index(tx_hash, *(const uint64_t *) v.mv_data);
+ tx_out_indices.push_back(result);
+ }
- TXN_POSTFIX_SUCCESS();
+ TXN_POSTFIX_SUCCESS();
}
void BlockchainLMDB::get_output_global_indices(const uint64_t& amount, const std::vector<uint64_t> &offsets,
- std::vector<uint64_t> &global_indices)
-{
- LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- TIME_MEASURE_START(txx);
- check_open();
- global_indices.clear();
-
- uint64_t max = 0;
- for (const uint64_t &index : offsets)
- {
- if (index > max)
- max = index;
- }
-
- TXN_PREFIX_RDONLY();
-
- lmdb_cur cur(*txn_ptr, m_output_amounts);
-
- MDB_val_copy<uint64_t> k(amount);
- MDB_val v;
- auto result = mdb_cursor_get(cur, &k, &v, MDB_SET);
- if (result == MDB_NOTFOUND)
- throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found"));
- else if (result)
- throw0(DB_ERROR("DB error attempting to get an output"));
-
- size_t num_elems = 0;
- mdb_cursor_count(cur, &num_elems);
- if (max <= 1 && num_elems <= max)
- throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found"));
-
- uint64_t t_dbmul = 0;
- uint64_t t_dbscan = 0;
- if (max <= 1)
- {
- for (const uint64_t& index : offsets)
- {
- mdb_cursor_get(cur, &k, &v, MDB_FIRST_DUP);
- for (uint64_t i = 0; i < index; ++i)
- {
- mdb_cursor_get(cur, &k, &v, MDB_NEXT_DUP);
- }
-
- mdb_cursor_get(cur, &k, &v, MDB_GET_CURRENT);
- uint64_t glob_index = *(const uint64_t*) v.mv_data;
- LOG_PRINT_L3("Amount: " << amount << " M0->v: " << glob_index);
- global_indices.push_back(glob_index);
- }
- }
- else
- {
- uint32_t curcount = 0;
- uint32_t blockstart = 0;
- for (const uint64_t& index : offsets)
- {
- if (index >= num_elems)
- {
- LOG_PRINT_L1("Index: " << index << " Elems: " << num_elems << " partial results found for get_output_tx_and_index");
- break;
- }
- while (index >= curcount)
- {
- TIME_MEASURE_START(db1);
- if (mdb_cursor_get(cur, &k, &v, curcount == 0 ? MDB_GET_MULTIPLE : MDB_NEXT_MULTIPLE) != 0)
- {
- // allow partial results
- result = false;
- break;
- }
-
- int count = v.mv_size / sizeof(uint64_t);
-
- blockstart = curcount;
- curcount += count;
- TIME_MEASURE_FINISH(db1);
- t_dbmul += db1;
- }
-
- LOG_PRINT_L3("Records returned: " << curcount << " Index: " << index);
- TIME_MEASURE_START(db2);
- uint64_t actual_index = index - blockstart;
- uint64_t glob_index = ((const uint64_t*) v.mv_data)[actual_index];
-
- LOG_PRINT_L3("Amount: " << amount << " M1->v: " << glob_index);
- global_indices.push_back(glob_index);
-
- TIME_MEASURE_FINISH(db2);
- t_dbscan += db2;
-
- }
- }
-
- cur.close();
- TXN_POSTFIX_SUCCESS();
-
- TIME_MEASURE_FINISH(txx);
- LOG_PRINT_L3("txx: " << txx << " db1: " << t_dbmul << " db2: " << t_dbscan);
+ std::vector<uint64_t> &global_indices)
+{
+ LOG_PRINT_L3("BlockchainLMDB::" << __func__);
+ TIME_MEASURE_START(txx);
+ check_open();
+ global_indices.clear();
+
+ uint64_t max = 0;
+ for (const uint64_t &index : offsets)
+ {
+ if (index > max)
+ max = index;
+ }
+
+ TXN_PREFIX_RDONLY();
+
+ lmdb_cur cur(*txn_ptr, m_output_amounts);
+
+ MDB_val_copy<uint64_t> k(amount);
+ MDB_val v;
+ auto result = mdb_cursor_get(cur, &k, &v, MDB_SET);
+ if (result == MDB_NOTFOUND)
+ throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found"));
+ else if (result)
+ throw0(DB_ERROR("DB error attempting to get an output"));
+
+ size_t num_elems = 0;
+ mdb_cursor_count(cur, &num_elems);
+ if (max <= 1 && num_elems <= max)
+ throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found"));
+
+ uint64_t t_dbmul = 0;
+ uint64_t t_dbscan = 0;
+ if (max <= 1)
+ {
+ for (const uint64_t& index : offsets)
+ {
+ mdb_cursor_get(cur, &k, &v, MDB_FIRST_DUP);
+ for (uint64_t i = 0; i < index; ++i)
+ {
+ mdb_cursor_get(cur, &k, &v, MDB_NEXT_DUP);
+ }
+
+ mdb_cursor_get(cur, &k, &v, MDB_GET_CURRENT);
+ uint64_t glob_index = *(const uint64_t*) v.mv_data;
+ LOG_PRINT_L3("Amount: " << amount << " M0->v: " << glob_index);
+ global_indices.push_back(glob_index);
+ }
+ }
+ else
+ {
+ uint32_t curcount = 0;
+ uint32_t blockstart = 0;
+ for (const uint64_t& index : offsets)
+ {
+ if (index >= num_elems)
+ {
+ LOG_PRINT_L1("Index: " << index << " Elems: " << num_elems << " partial results found for get_output_tx_and_index");
+ break;
+ }
+ while (index >= curcount)
+ {
+ TIME_MEASURE_START(db1);
+ if (mdb_cursor_get(cur, &k, &v, curcount == 0 ? MDB_GET_MULTIPLE : MDB_NEXT_MULTIPLE) != 0)
+ {
+ // allow partial results
+ result = false;
+ break;
+ }
+
+ int count = v.mv_size / sizeof(uint64_t);
+
+ blockstart = curcount;
+ curcount += count;
+ TIME_MEASURE_FINISH(db1);
+ t_dbmul += db1;
+ }
+
+ LOG_PRINT_L3("Records returned: " << curcount << " Index: " << index);
+ TIME_MEASURE_START(db2);
+ uint64_t actual_index = index - blockstart;
+ uint64_t glob_index = ((const uint64_t*) v.mv_data)[actual_index];
+
+ LOG_PRINT_L3("Amount: " << amount << " M1->v: " << glob_index);
+ global_indices.push_back(glob_index);
+
+ TIME_MEASURE_FINISH(db2);
+ t_dbscan += db2;
+
+ }
+ }
+
+ cur.close();
+ TXN_POSTFIX_SUCCESS();
+
+ TIME_MEASURE_FINISH(txx);
+ LOG_PRINT_L3("txx: " << txx << " db1: " << t_dbmul << " db2: " << t_dbscan);
}
void BlockchainLMDB::get_output_key(const uint64_t &amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs)
{
- LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- TIME_MEASURE_START(db3);
- check_open();
- outputs.clear();
+ LOG_PRINT_L3("BlockchainLMDB::" << __func__);
+ TIME_MEASURE_START(db3);
+ check_open();
+ outputs.clear();
- std::vector <uint64_t> global_indices;
- get_output_global_indices(amount, offsets, global_indices);
+ std::vector <uint64_t> global_indices;
+ get_output_global_indices(amount, offsets, global_indices);
- if (global_indices.size() > 0)
- {
- TXN_PREFIX_RDONLY();
+ if (global_indices.size() > 0)
+ {
+ TXN_PREFIX_RDONLY();
- for (const uint64_t &index : global_indices)
- {
- MDB_val_copy<uint64_t> k(index);
- MDB_val v;
+ for (const uint64_t &index : global_indices)
+ {
+ MDB_val_copy<uint64_t> k(index);
+ MDB_val v;
- auto get_result = mdb_get(*txn_ptr, m_output_keys, &k, &v);
- if (get_result == MDB_NOTFOUND)
- throw1(OUTPUT_DNE("Attempting to get output pubkey by global index, but key does not exist"));
- else if (get_result)
- throw0(DB_ERROR("Error attempting to retrieve an output pubkey from the db"));
+ auto get_result = mdb_get(*txn_ptr, m_output_keys, &k, &v);
+ if (get_result == MDB_NOTFOUND)
+ throw1(OUTPUT_DNE("Attempting to get output pubkey by global index, but key does not exist"));
+ else if (get_result)
+ throw0(DB_ERROR("Error attempting to retrieve an output pubkey from the db"));
- output_data_t data = *(const output_data_t *) v.mv_data;
- outputs.push_back(data);
- }
+ output_data_t data = *(const output_data_t *) v.mv_data;
+ outputs.push_back(data);
+ }
- TXN_POSTFIX_SUCCESS();
- }
+ TXN_POSTFIX_SUCCESS();
+ }
- TIME_MEASURE_FINISH(db3);
- LOG_PRINT_L3("db3: " << db3);
+ TIME_MEASURE_FINISH(db3);
+ LOG_PRINT_L3("db3: " << db3);
}
void BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const std::vector<uint64_t> &offsets, std::vector<tx_out_index> &indices)
{
- LOG_PRINT_L3("BlockchainLMDB::" << __func__);
- check_open();
- indices.clear();
+ LOG_PRINT_L3("BlockchainLMDB::" << __func__);
+ check_open();
+ indices.clear();
- std::vector <uint64_t> global_indices;
- get_output_global_indices(amount, offsets, global_indices);
+ std::vector <uint64_t> global_indices;
+ get_output_global_indices(amount, offsets, global_indices);
- TIME_MEASURE_START(db3);
- if(global_indices.size() > 0)
- {
- get_output_tx_and_index_from_global(global_indices, indices);
- }
- TIME_MEASURE_FINISH(db3);
- LOG_PRINT_L3("db3: " << db3);
+ TIME_MEASURE_START(db3);
+ if(global_indices.size() > 0)
+ {
+ get_output_tx_and_index_from_global(global_indices, indices);
+ }
+ TIME_MEASURE_FINISH(db3);
+ LOG_PRINT_L3("db3: " << db3);
}
void BlockchainLMDB::set_hard_fork_starting_height(uint8_t version, uint64_t height)
diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h
index 9e45e26cc..d1d3b942d 100644
--- a/src/blockchain_db/lmdb/db_lmdb.h
+++ b/src/blockchain_db/lmdb/db_lmdb.h
@@ -1,20 +1,20 @@
// Copyright (c) 2014, The Monero Project
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -165,7 +165,7 @@ public:
virtual tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const;
virtual void get_output_tx_and_index_from_global(const std::vector<uint64_t> &global_indices,
- std::vector<tx_out_index> &tx_out_indices) const;
+ std::vector<tx_out_index> &tx_out_indices) const;
virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index);
virtual void get_output_tx_and_index(const uint64_t& amount, const std::vector<uint64_t> &offsets, std::vector<tx_out_index> &indices);
diff --git a/src/blocks/blockexports.c b/src/blocks/blockexports.c
index 3fa3d4d07..26adaad62 100644
--- a/src/blocks/blockexports.c
+++ b/src/blocks/blockexports.c
@@ -49,18 +49,18 @@ extern const unsigned char _binary_testnet_blocks_end[];
const unsigned char *get_blocks_dat_start(int testnet)
{
- if (testnet)
- return _binary_testnet_blocks_start;
- else
- return _binary_blocks_start;
+ if (testnet)
+ return _binary_testnet_blocks_start;
+ else
+ return _binary_blocks_start;
}
size_t get_blocks_dat_size(int testnet)
{
- if (testnet)
- return (size_t) (_binary_testnet_blocks_end - _binary_testnet_blocks_start);
- else
- return (size_t) (_binary_blocks_end - _binary_blocks_start);
+ if (testnet)
+ return (size_t) (_binary_testnet_blocks_end - _binary_testnet_blocks_start);
+ else
+ return (size_t) (_binary_blocks_end - _binary_blocks_start);
}
#endif
diff --git a/src/common/dns_utils.cpp b/src/common/dns_utils.cpp
index 2ae5d9624..5bb64d4e5 100644
--- a/src/common/dns_utils.cpp
+++ b/src/common/dns_utils.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -86,7 +86,7 @@ get_builtin_cert(void)
static const char*
get_builtin_ds(void)
{
- return
+ return
". IN DS 19036 8 2 49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5\n";
}
@@ -183,30 +183,51 @@ struct DNSResolverData
DNSResolver::DNSResolver() : m_data(new DNSResolverData())
{
+ int use_dns_public = 0;
+ const char* dns_public_addr = "8.8.4.4";
+ if (auto res = getenv("DNS_PUBLIC"))
+ {
+ std::string dns_public(res);
+ // TODO: could allow parsing of IP and protocol: e.g. DNS_PUBLIC=tcp:8.8.8.8
+ if (dns_public == "tcp")
+ {
+ LOG_PRINT_L0("Using public DNS server: " << dns_public_addr << " (TCP)");
+ use_dns_public = 1;
+ }
+ }
+
// init libunbound context
m_data->m_ub_context = ub_ctx_create();
- // look for "/etc/resolv.conf" and "/etc/hosts" or platform equivalent
- ub_ctx_resolvconf(m_data->m_ub_context, NULL);
- ub_ctx_hosts(m_data->m_ub_context, NULL);
-
- #ifdef DEVELOPER_LIBUNBOUND_OLD
- #pragma message "Using the work around for old libunbound"
- { // work around for bug https://www.nlnetlabs.nl/bugs-script/show_bug.cgi?id=515 needed for it to compile on e.g. Debian 7
- char * ds_copy = NULL; // this will be the writable copy of string that bugged version of libunbound requires
- try {
- char * ds_copy = strdup( ::get_builtin_ds() );
- ub_ctx_add_ta(m_data->m_ub_context, ds_copy);
- } catch(...) { // probably not needed but to work correctly in every case...
- if (ds_copy) { free(ds_copy); ds_copy=NULL; } // for the strdup
- throw ;
- }
- if (ds_copy) { free(ds_copy); ds_copy=NULL; } // for the strdup
- }
- #else
- // normal version for fixed libunbound
- ub_ctx_add_ta(m_data->m_ub_context, ::get_builtin_ds() );
- #endif
+ if (use_dns_public)
+ {
+ ub_ctx_set_fwd(m_data->m_ub_context, dns_public_addr);
+ ub_ctx_set_option(m_data->m_ub_context, "do-udp:", "no");
+ ub_ctx_set_option(m_data->m_ub_context, "do-tcp:", "yes");
+ }
+ else {
+ // look for "/etc/resolv.conf" and "/etc/hosts" or platform equivalent
+ ub_ctx_resolvconf(m_data->m_ub_context, NULL);
+ ub_ctx_hosts(m_data->m_ub_context, NULL);
+ }
+
+ #ifdef DEVELOPER_LIBUNBOUND_OLD
+ #pragma message "Using the work around for old libunbound"
+ { // work around for bug https://www.nlnetlabs.nl/bugs-script/show_bug.cgi?id=515 needed for it to compile on e.g. Debian 7
+ char * ds_copy = NULL; // this will be the writable copy of string that bugged version of libunbound requires
+ try {
+ char * ds_copy = strdup( ::get_builtin_ds() );
+ ub_ctx_add_ta(m_data->m_ub_context, ds_copy);
+ } catch(...) { // probably not needed but to work correctly in every case...
+ if (ds_copy) { free(ds_copy); ds_copy=NULL; } // for the strdup
+ throw ;
+ }
+ if (ds_copy) { free(ds_copy); ds_copy=NULL; } // for the strdup
+ }
+ #else
+ // normal version for fixed libunbound
+ ub_ctx_add_ta(m_data->m_ub_context, ::get_builtin_ds() );
+ #endif
}
diff --git a/src/crypto/aesb.c b/src/crypto/aesb.c
index 128c523ea..5d57b8af4 100644
--- a/src/crypto/aesb.c
+++ b/src/crypto/aesb.c
@@ -25,11 +25,11 @@ extern "C"
{
#endif
-#define TABLE_ALIGN 32
-#define WPOLY 0x011b
-#define N_COLS 4
-#define AES_BLOCK_SIZE 16
-#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2))
+#define TABLE_ALIGN 32
+#define WPOLY 0x011b
+#define N_COLS 4
+#define AES_BLOCK_SIZE 16
+#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2))
#if defined(_MSC_VER)
#define ALIGN __declspec(align(TABLE_ALIGN))
@@ -61,45 +61,45 @@ extern "C"
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
#define sb_data(w) {\
- w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
- w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
- w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
- w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
- w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
- w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
- w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
- w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
- w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
- w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
- w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
- w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
- w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
- w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
- w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
- w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
- w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
- w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
- w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
- w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
- w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
- w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
- w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
- w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
- w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
- w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
- w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
- w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
- w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
- w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
- w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
- w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
+ w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
+ w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
+ w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
+ w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
+ w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
+ w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
+ w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
+ w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
+ w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
+ w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
+ w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
+ w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
+ w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
+ w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
+ w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
+ w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
+ w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
+ w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
+ w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
+ w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
+ w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
+ w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
+ w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
+ w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
+ w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
+ w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
+ w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
+ w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
+ w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
+ w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
+ w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
+ w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
#define rc_data(w) {\
- w(0x01), w(0x02), w(0x04), w(0x08), w(0x10),w(0x20), w(0x40), w(0x80),\
- w(0x1b), w(0x36) }
+ w(0x01), w(0x02), w(0x04), w(0x08), w(0x10),w(0x20), w(0x40), w(0x80),\
+ w(0x1b), w(0x36) }
#define bytes2word(b0, b1, b2, b3) (((uint32_t)(b3) << 24) | \
- ((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | (b0))
+ ((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | (b0))
#define h0(x) (x)
#define w0(p) bytes2word(p, 0, 0, 0)
@@ -133,10 +133,10 @@ extern "C"
#define d_4(t,n,b,e,f,g,h) ALIGN const t n[4][256] = { b(e), b(f), b(g), b(h) }
#define four_tables(x,tab,vf,rf,c) \
- (tab[0][bval(vf(x,0,c),rf(0,c))] \
- ^ tab[1][bval(vf(x,1,c),rf(1,c))] \
- ^ tab[2][bval(vf(x,2,c),rf(2,c))] \
- ^ tab[3][bval(vf(x,3,c),rf(3,c))])
+ (tab[0][bval(vf(x,0,c),rf(0,c))] \
+ ^ tab[1][bval(vf(x,1,c),rf(1,c))] \
+ ^ tab[2][bval(vf(x,2,c),rf(2,c))] \
+ ^ tab[3][bval(vf(x,3,c),rf(3,c))])
d_4(uint32_t, t_dec(f,n), sb_data, u0, u1, u2, u3);
@@ -149,34 +149,34 @@ d_4(uint32_t, t_dec(f,n), sb_data, u0, u1, u2, u3);
#endif
STATIC INLINE void aesb_single_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey)
-{
- uint32_t b0[4], b1[4];
- const uint32_t *kp = (uint32_t *) expandedKey;
- state_in(b0, in);
+{
+ uint32_t b0[4], b1[4];
+ const uint32_t *kp = (uint32_t *) expandedKey;
+ state_in(b0, in);
- round(fwd_rnd, b1, b0, kp);
+ round(fwd_rnd, b1, b0, kp);
- state_out(out, b1);
+ state_out(out, b1);
}
STATIC INLINE void aesb_pseudo_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey)
-{
- uint32_t b0[4], b1[4];
- const uint32_t *kp = (uint32_t *) expandedKey;
- state_in(b0, in);
-
- round(fwd_rnd, b1, b0, kp);
- round(fwd_rnd, b0, b1, kp + 1 * N_COLS);
- round(fwd_rnd, b1, b0, kp + 2 * N_COLS);
- round(fwd_rnd, b0, b1, kp + 3 * N_COLS);
- round(fwd_rnd, b1, b0, kp + 4 * N_COLS);
- round(fwd_rnd, b0, b1, kp + 5 * N_COLS);
- round(fwd_rnd, b1, b0, kp + 6 * N_COLS);
- round(fwd_rnd, b0, b1, kp + 7 * N_COLS);
- round(fwd_rnd, b1, b0, kp + 8 * N_COLS);
- round(fwd_rnd, b0, b1, kp + 9 * N_COLS);
-
- state_out(out, b0);
+{
+ uint32_t b0[4], b1[4];
+ const uint32_t *kp = (uint32_t *) expandedKey;
+ state_in(b0, in);
+
+ round(fwd_rnd, b1, b0, kp);
+ round(fwd_rnd, b0, b1, kp + 1 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 2 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 3 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 4 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 5 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 6 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 7 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 8 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 9 * N_COLS);
+
+ state_out(out, b0);
}
diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c
index 2a746de3d..51ad2f40b 100644
--- a/src/crypto/slow-hash.c
+++ b/src/crypto/slow-hash.c
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <assert.h>
@@ -104,33 +104,33 @@
#endif
#define pre_aes() \
- j = state_index(a); \
- _c = _mm_load_si128(R128(&hp_state[j])); \
- _a = _mm_load_si128(R128(a)); \
+ j = state_index(a); \
+ _c = _mm_load_si128(R128(&hp_state[j])); \
+ _a = _mm_load_si128(R128(a)); \
/*
* An SSE-optimized implementation of the second half of CryptoNight step 3.
- * After using AES to mix a scratchpad value into _c (done by the caller),
- * this macro xors it with _b and stores the result back to the same index (j) that it
+ * After using AES to mix a scratchpad value into _c (done by the caller),
+ * this macro xors it with _b and stores the result back to the same index (j) that it
* loaded the scratchpad value from. It then performs a second random memory
* read/write from the scratchpad, but this time mixes the values using a 64
* bit multiply.
* This code is based upon an optimized implementation by dga.
*/
#define post_aes() \
- _mm_store_si128(R128(c), _c); \
- _b = _mm_xor_si128(_b, _c); \
- _mm_store_si128(R128(&hp_state[j]), _b); \
- j = state_index(c); \
- p = U64(&hp_state[j]); \
- b[0] = p[0]; b[1] = p[1]; \
- __mul(); \
- a[0] += hi; a[1] += lo; \
- p = U64(&hp_state[j]); \
- p[0] = a[0]; p[1] = a[1]; \
- a[0] ^= b[0]; a[1] ^= b[1]; \
- _b = _c; \
-
+ _mm_store_si128(R128(c), _c); \
+ _b = _mm_xor_si128(_b, _c); \
+ _mm_store_si128(R128(&hp_state[j]), _b); \
+ j = state_index(c); \
+ p = U64(&hp_state[j]); \
+ b[0] = p[0]; b[1] = p[1]; \
+ __mul(); \
+ a[0] += hi; a[1] += lo; \
+ p = U64(&hp_state[j]); \
+ p[0] = a[0]; p[1] = a[1]; \
+ a[0] ^= b[0]; a[1] ^= b[1]; \
+ _b = _c; \
+
#if defined(_MSC_VER)
#define THREADV __declspec(thread)
#else
@@ -226,14 +226,14 @@ STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3)
*t3 = _mm_xor_si128(*t3, t2);
}
-/**
+/**
* @brief expands 'key' into a form it can be used for AES encryption.
- *
+ *
* This is an SSE-optimized implementation of AES key schedule generation. It
* expands the key into multiple round keys, each of which is used in one round
* of the AES encryption used to fill (and later, extract randomness from)
* the large 2MB buffer. Note that CryptoNight does not use a completely
- * standard AES encryption for its buffer expansion, so do not copy this
+ * standard AES encryption for its buffer expansion, so do not copy this
* function outside of Monero without caution! This version uses the hardware
* AESKEYGENASSIST instruction to speed key generation, and thus requires
* CPU AES support.
@@ -402,7 +402,7 @@ BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable)
/**
* @brief allocate the 2MB scratch buffer using OS support for huge pages, if available
*
- * This function tries to allocate the 2MB scratch buffer using a single
+ * This function tries to allocate the 2MB scratch buffer using a single
* 2MB "huge page" (instead of the usual 4KB page sizes) to reduce TLB misses
* during the random accesses to the scratch buffer. This is one of the
* important speed optimizations needed to make CryptoNight faster.
@@ -423,7 +423,7 @@ void slow_hash_allocate_state(void)
#else
#if defined(__APPLE__) || defined(__FreeBSD__)
hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, 0, 0);
+ MAP_PRIVATE | MAP_ANON, 0, 0);
#else
hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0);
@@ -562,7 +562,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash)
*/
_b = _mm_load_si128(R128(b));
- // Two independent versions, one with AES, one without, to ensure that
+ // Two independent versions, one with AES, one without, to ensure that
// the useAes test is only performed once, not every iteration.
if(useAes)
{
@@ -665,59 +665,59 @@ static void (*const extra_hashes[4])(const void *, size_t, char *) = {
STATIC INLINE void ___mul128(uint32_t *a, uint32_t *b, uint32_t *h, uint32_t *l)
{
- // ND: 64x64 multiplication for ARM7
- __asm__ __volatile__
- (
- // lo hi
- "umull %[r0], %[r1], %[b], %[d]\n\t" // bd [r0 = bd.lo]
- "umull %[r2], %[r3], %[b], %[c]\n\t" // bc
- "umull %[b], %[c], %[a], %[c]\n\t" // ac
- "adds %[r1], %[r1], %[r2]\n\t" // r1 = bd.hi + bc.lo
- "adcs %[r2], %[r3], %[b]\n\t" // r2 = ac.lo + bc.hi + carry
- "adc %[r3], %[c], #0\n\t" // r3 = ac.hi + carry
- "umull %[b], %[a], %[a], %[d]\n\t" // ad
- "adds %[r1], %[r1], %[b]\n\t" // r1 = bd.hi + bc.lo + ad.lo
- "adcs %[r2], %[r2], %[a]\n\t" // r2 = ac.lo + bc.hi + ad.hi + carry
- "adc %[r3], %[r3], #0\n\t" // r3 = ac.hi + carry
- : [r0]"=&r"(l[0]), [r1]"=&r"(l[1]), [r2]"=&r"(h[0]), [r3]"=&r"(h[1])
- : [a]"r"(a[1]), [b]"r"(a[0]), [c]"r"(b[1]), [d]"r"(b[0])
- : "cc"
- );
+ // ND: 64x64 multiplication for ARM7
+ __asm__ __volatile__
+ (
+ // lo hi
+ "umull %[r0], %[r1], %[b], %[d]\n\t" // bd [r0 = bd.lo]
+ "umull %[r2], %[r3], %[b], %[c]\n\t" // bc
+ "umull %[b], %[c], %[a], %[c]\n\t" // ac
+ "adds %[r1], %[r1], %[r2]\n\t" // r1 = bd.hi + bc.lo
+ "adcs %[r2], %[r3], %[b]\n\t" // r2 = ac.lo + bc.hi + carry
+ "adc %[r3], %[c], #0\n\t" // r3 = ac.hi + carry
+ "umull %[b], %[a], %[a], %[d]\n\t" // ad
+ "adds %[r1], %[r1], %[b]\n\t" // r1 = bd.hi + bc.lo + ad.lo
+ "adcs %[r2], %[r2], %[a]\n\t" // r2 = ac.lo + bc.hi + ad.hi + carry
+ "adc %[r3], %[r3], #0\n\t" // r3 = ac.hi + carry
+ : [r0]"=&r"(l[0]), [r1]"=&r"(l[1]), [r2]"=&r"(h[0]), [r3]"=&r"(h[1])
+ : [a]"r"(a[1]), [b]"r"(a[0]), [c]"r"(b[1]), [d]"r"(b[0])
+ : "cc"
+ );
}
STATIC INLINE void mul(const uint8_t* a, const uint8_t* b, uint8_t* res)
{
- ___mul128((uint32_t *) a, (uint32_t *) b, (uint32_t *) (res + 0), (uint32_t *) (res + 8));
+ ___mul128((uint32_t *) a, (uint32_t *) b, (uint32_t *) (res + 0), (uint32_t *) (res + 8));
}
STATIC INLINE void sum_half_blocks(uint8_t* a, const uint8_t* b)
{
- uint64_t a0, a1, b0, b1;
- a0 = U64(a)[0];
- a1 = U64(a)[1];
- b0 = U64(b)[0];
- b1 = U64(b)[1];
- a0 += b0;
- a1 += b1;
- U64(a)[0] = a0;
- U64(a)[1] = a1;
+ uint64_t a0, a1, b0, b1;
+ a0 = U64(a)[0];
+ a1 = U64(a)[1];
+ b0 = U64(b)[0];
+ b1 = U64(b)[1];
+ a0 += b0;
+ a1 += b1;
+ U64(a)[0] = a0;
+ U64(a)[1] = a1;
}
STATIC INLINE void swap_blocks(uint8_t *a, uint8_t *b)
{
- uint64_t t[2];
- U64(t)[0] = U64(a)[0];
- U64(t)[1] = U64(a)[1];
- U64(a)[0] = U64(b)[0];
- U64(a)[1] = U64(b)[1];
- U64(b)[0] = U64(t)[0];
- U64(b)[1] = U64(t)[1];
+ uint64_t t[2];
+ U64(t)[0] = U64(a)[0];
+ U64(t)[1] = U64(a)[1];
+ U64(a)[0] = U64(b)[0];
+ U64(a)[1] = U64(b)[1];
+ U64(b)[0] = U64(t)[0];
+ U64(b)[1] = U64(t)[1];
}
STATIC INLINE void xor_blocks(uint8_t* a, const uint8_t* b)
{
- U64(a)[0] ^= U64(b)[0];
- U64(a)[1] ^= U64(b)[1];
+ U64(a)[0] ^= U64(b)[0];
+ U64(a)[1] ^= U64(b)[1];
}
#pragma pack(push, 1)
@@ -774,25 +774,25 @@ void cn_slow_hash(const void *data, size_t length, char *hash)
for(i = 0; i < ITER / 2; i++)
{
- #define MASK ((uint32_t)(((MEMORY / AES_BLOCK_SIZE) - 1) << 4))
- #define state_index(x) ((*(uint32_t *) x) & MASK)
+ #define MASK ((uint32_t)(((MEMORY / AES_BLOCK_SIZE) - 1) << 4))
+ #define state_index(x) ((*(uint32_t *) x) & MASK)
- // Iteration 1
- p = &long_state[state_index(a)];
- aesb_single_round(p, p, a);
+ // Iteration 1
+ p = &long_state[state_index(a)];
+ aesb_single_round(p, p, a);
- xor_blocks(b, p);
- swap_blocks(b, p);
- swap_blocks(a, b);
+ xor_blocks(b, p);
+ swap_blocks(b, p);
+ swap_blocks(a, b);
- // Iteration 2
- p = &long_state[state_index(a)];
+ // Iteration 2
+ p = &long_state[state_index(a)];
- mul(a, p, d);
- sum_half_blocks(b, d);
- swap_blocks(b, p);
- xor_blocks(b, p);
- swap_blocks(a, b);
+ mul(a, p, d);
+ sum_half_blocks(b, d);
+ swap_blocks(b, p);
+ xor_blocks(b, p);
+ swap_blocks(a, b);
}
memcpy(text, state.init, INIT_SIZE_BYTE);
@@ -874,13 +874,13 @@ static void copy_block(uint8_t* dst, const uint8_t* src) {
}
static void swap_blocks(uint8_t *a, uint8_t *b){
- uint64_t t[2];
- U64(t)[0] = U64(a)[0];
- U64(t)[1] = U64(a)[1];
- U64(a)[0] = U64(b)[0];
- U64(a)[1] = U64(b)[1];
- U64(b)[0] = U64(t)[0];
- U64(b)[1] = U64(t)[1];
+ uint64_t t[2];
+ U64(t)[0] = U64(a)[0];
+ U64(t)[1] = U64(a)[1];
+ U64(a)[0] = U64(b)[0];
+ U64(a)[1] = U64(b)[1];
+ U64(b)[0] = U64(t)[0];
+ U64(b)[1] = U64(t)[1];
}
static void xor_blocks(uint8_t* a, const uint8_t* b) {
@@ -916,11 +916,11 @@ void cn_slow_hash(const void *data, size_t length, char *hash) {
memcpy(text, state.init, INIT_SIZE_BYTE);
memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
aes_ctx = (oaes_ctx *) oaes_alloc();
-
+
oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE);
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
- for (j = 0; j < INIT_SIZE_BLK; j++) {
- aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
+ for (j = 0; j < INIT_SIZE_BLK; j++) {
+ aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
}
memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
@@ -938,7 +938,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash) {
/* Iteration 1 */
j = e2i(a, MEMORY / AES_BLOCK_SIZE);
copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
- aesb_single_round(c, c, a);
+ aesb_single_round(c, c, a);
xor_blocks(b, c);
swap_blocks(b, c);
copy_block(&long_state[j * AES_BLOCK_SIZE], c);
@@ -961,7 +961,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash) {
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
for (j = 0; j < INIT_SIZE_BLK; j++) {
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
- aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
+ aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
}
}
memcpy(state.init, text, INIT_SIZE_BYTE);
diff --git a/src/cryptonote_core/CMakeLists.txt b/src/cryptonote_core/CMakeLists.txt
index 7b0a5017d..4aaad88a2 100644
--- a/src/cryptonote_core/CMakeLists.txt
+++ b/src/cryptonote_core/CMakeLists.txt
@@ -65,9 +65,9 @@ set(cryptonote_core_private_headers
hardfork.h)
if(PER_BLOCK_CHECKPOINT)
- set(Blocks "blocks")
+ set(Blocks "blocks")
else()
- set(Blocks "")
+ set(Blocks "")
endif()
bitmonero_private_headers(cryptonote_core
@@ -86,7 +86,7 @@ target_link_libraries(cryptonote_core
${Boost_PROGRAM_OPTIONS_LIBRARY}
${Boost_SERIALIZATION_LIBRARY}
LINK_PRIVATE
- ${Blocks}
+ ${Blocks}
${Boost_FILESYSTEM_LIBRARY}
${Boost_SYSTEM_LIBRARY}
${Boost_THREAD_LIBRARY}
diff --git a/src/cryptonote_core/blockchain.cpp b/src/cryptonote_core/blockchain.cpp
index 31945d166..bf7294375 100644
--- a/src/cryptonote_core/blockchain.cpp
+++ b/src/cryptonote_core/blockchain.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <algorithm>
@@ -99,24 +99,24 @@ static const uint64_t testnet_hard_fork_version_1_till = 624633;
//------------------------------------------------------------------
Blockchain::Blockchain(tx_memory_pool& tx_pool) :
-m_db(), m_tx_pool(tx_pool), m_timestamps_and_difficulties_height(0), m_current_block_cumul_sz_limit(0), m_is_in_checkpoint_zone(false),
-m_is_blockchain_storing(false), m_enforce_dns_checkpoints(false), m_max_prepare_blocks_threads(4), m_db_blocks_per_sync(1), m_db_sync_mode(db_async), m_fast_sync(true)
+ m_db(), m_tx_pool(tx_pool), m_timestamps_and_difficulties_height(0), m_current_block_cumul_sz_limit(0), m_is_in_checkpoint_zone(false),
+ m_is_blockchain_storing(false), m_enforce_dns_checkpoints(false), m_max_prepare_blocks_threads(4), m_db_blocks_per_sync(1), m_db_sync_mode(db_async), m_fast_sync(true)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
+ LOG_PRINT_L3("Blockchain::" << __func__);
}
//------------------------------------------------------------------
bool Blockchain::have_tx(const crypto::hash &id) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- return m_db->tx_exists(id);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ return m_db->tx_exists(id);
}
//------------------------------------------------------------------
bool Blockchain::have_tx_keyimg_as_spent(const crypto::key_image &key_im) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- return m_db->has_key_image(key_im);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ return m_db->has_key_image(key_im);
}
//------------------------------------------------------------------
// This function makes sure that each "input" in an input (mixins) exists
@@ -125,305 +125,305 @@ bool Blockchain::have_tx_keyimg_as_spent(const crypto::key_image &key_im) const
template <class visitor_t>
bool Blockchain::scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t &vis, const crypto::hash &tx_prefix_hash, uint64_t* pmax_related_block_height) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
-
- // ND: Disable locking and make method private.
- //CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
- // verify that the input has key offsets (that it exists properly, really)
- if(!tx_in_to_key.key_offsets.size())
- return false;
+ // ND: Disable locking and make method private.
+ //CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // cryptonote_format_utils uses relative offsets for indexing to the global
- // outputs list. that is to say that absolute offset #2 is absolute offset
- // #1 plus relative offset #2.
- // TODO: Investigate if this is necessary / why this is done.
- std::vector<uint64_t> absolute_offsets = relative_output_offsets_to_absolute(tx_in_to_key.key_offsets);
- std::vector<output_data_t> outputs;
+ // verify that the input has key offsets (that it exists properly, really)
+ if(!tx_in_to_key.key_offsets.size())
+ return false;
- bool found = false;
- auto it = m_scan_table.find(tx_prefix_hash);
- if (it != m_scan_table.end())
+ // cryptonote_format_utils uses relative offsets for indexing to the global
+ // outputs list. that is to say that absolute offset #2 is absolute offset
+ // #1 plus relative offset #2.
+ // TODO: Investigate if this is necessary / why this is done.
+ std::vector<uint64_t> absolute_offsets = relative_output_offsets_to_absolute(tx_in_to_key.key_offsets);
+ std::vector<output_data_t> outputs;
+
+ bool found = false;
+ auto it = m_scan_table.find(tx_prefix_hash);
+ if (it != m_scan_table.end())
+ {
+ auto its = it->second.find(tx_in_to_key.k_image);
+ if (its != it->second.end())
+ {
+ outputs = its->second;
+ found = true;
+ }
+ }
+
+ if (!found)
+ {
+ m_db->get_output_key(tx_in_to_key.amount, absolute_offsets, outputs);
+ }
+ else
+ {
+ // check for partial results and add the rest if needed;
+ if (outputs.size() < absolute_offsets.size() && outputs.size() > 0)
+ {
+ LOG_PRINT_L1("Additional outputs needed: " << absolute_offsets.size() - outputs.size());
+ std::vector < uint64_t > add_offsets;
+ std::vector<output_data_t> add_outputs;
+ for (size_t i = outputs.size(); i < absolute_offsets.size(); i++)
+ add_offsets.push_back(absolute_offsets[i]);
+ m_db->get_output_key(tx_in_to_key.amount, add_offsets, add_outputs);
+ outputs.insert(outputs.end(), add_outputs.begin(), add_outputs.end());
+ }
+ }
+
+ size_t count = 0;
+ for (const uint64_t& i : absolute_offsets)
+ {
+ try
{
- auto its = it->second.find(tx_in_to_key.k_image);
- if (its != it->second.end())
- {
- outputs = its->second;
- found = true;
- }
- }
+ output_data_t output_index;
+ try
+ {
+ // get tx hash and output index for output
+ if (count < outputs.size())
+ output_index = outputs.at(count);
+ else
+ output_index = m_db->get_output_key(tx_in_to_key.amount, i);
- if (!found)
- {
- m_db->get_output_key(tx_in_to_key.amount, absolute_offsets, outputs);
- }
- else
- {
- // check for partial results and add the rest if needed;
- if (outputs.size() < absolute_offsets.size() && outputs.size() > 0)
+ // call to the passed boost visitor to grab the public key for the output
+ if (!vis.handle_output(output_index.unlock_time, output_index.pubkey))
{
- LOG_PRINT_L1("Additional outputs needed: " << absolute_offsets.size() - outputs.size());
- std::vector < uint64_t > add_offsets;
- std::vector<output_data_t> add_outputs;
- for (size_t i = outputs.size(); i < absolute_offsets.size(); i++)
- add_offsets.push_back(absolute_offsets[i]);
- m_db->get_output_key(tx_in_to_key.amount, add_offsets, add_outputs);
- outputs.insert(outputs.end(), add_outputs.begin(), add_outputs.end());
+ LOG_PRINT_L0("Failed to handle_output for output no = " << count << ", with absolute offset " << i);
+ return false;
}
- }
-
- size_t count = 0;
- for (const uint64_t& i : absolute_offsets)
- {
- try
- {
- output_data_t output_index;
- try
- {
- // get tx hash and output index for output
- if (count < outputs.size())
- output_index = outputs.at(count);
- else
- output_index = m_db->get_output_key(tx_in_to_key.amount, i);
-
- // call to the passed boost visitor to grab the public key for the output
- if (!vis.handle_output(output_index.unlock_time, output_index.pubkey))
- {
- LOG_PRINT_L0("Failed to handle_output for output no = " << count << ", with absolute offset " << i);
- return false;
- }
- }
- catch (...)
- {
- LOG_PRINT_L0("Output does not exist! amount = " << tx_in_to_key.amount << ", absolute_offset = " << i);
- return false;
- }
-
- // if on last output and pmax_related_block_height not null pointer
- if(++count == absolute_offsets.size() && pmax_related_block_height)
- {
- // set *pmax_related_block_height to tx block height for this output
- auto h = output_index.height;
- if(*pmax_related_block_height < h)
- {
- *pmax_related_block_height = h;
- }
- }
+ }
+ catch (...)
+ {
+ LOG_PRINT_L0("Output does not exist! amount = " << tx_in_to_key.amount << ", absolute_offset = " << i);
+ return false;
+ }
- }
- catch (const OUTPUT_DNE& e)
- {
- LOG_PRINT_L0("Output does not exist: " << e.what());
- return false;
- }
- catch (const TX_DNE& e)
+ // if on last output and pmax_related_block_height not null pointer
+ if(++count == absolute_offsets.size() && pmax_related_block_height)
+ {
+ // set *pmax_related_block_height to tx block height for this output
+ auto h = output_index.height;
+ if(*pmax_related_block_height < h)
{
- LOG_PRINT_L0("Transaction does not exist: " << e.what());
- return false;
+ *pmax_related_block_height = h;
}
+ }
}
+ catch (const OUTPUT_DNE& e)
+ {
+ LOG_PRINT_L0("Output does not exist: " << e.what());
+ return false;
+ }
+ catch (const TX_DNE& e)
+ {
+ LOG_PRINT_L0("Transaction does not exist: " << e.what());
+ return false;
+ }
- return true;
+ }
+
+ return true;
}
//------------------------------------------------------------------
uint64_t Blockchain::get_current_blockchain_height() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- return m_db->height();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ return m_db->height();
}
//------------------------------------------------------------------
//FIXME: possibly move this into the constructor, to avoid accidentally
// dereferencing a null BlockchainDB pointer
bool Blockchain::init(BlockchainDB* db, const bool testnet, const bool fakechain)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
-
- if (db == nullptr)
- {
- LOG_ERROR("Attempted to init Blockchain with null DB");
- return false;
- }
- if (!db->is_open())
- {
- LOG_ERROR("Attempted to init Blockchain with unopened DB");
- return false;
- }
-
- m_db = db;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- if (testnet) {
- m_hardfork = new HardFork(*db, 1, testnet_hard_fork_version_1_till);
- for (size_t n = 0; n < sizeof(testnet_hard_forks) / sizeof(testnet_hard_forks[0]); ++n)
- m_hardfork->add_fork(testnet_hard_forks[n].version, testnet_hard_forks[n].height, testnet_hard_forks[n].threshold, testnet_hard_forks[n].time);
- }
- else
- {
- m_hardfork = new HardFork(*db, 1, mainnet_hard_fork_version_1_till);
- for (size_t n = 0; n < sizeof(mainnet_hard_forks) / sizeof(mainnet_hard_forks[0]); ++n)
- m_hardfork->add_fork(mainnet_hard_forks[n].version, mainnet_hard_forks[n].height, mainnet_hard_forks[n].threshold, mainnet_hard_forks[n].time);
- }
- m_hardfork->init();
-
- // if the blockchain is new, add the genesis block
- // this feels kinda kludgy to do it this way, but can be looked at later.
- // TODO: add function to create and store genesis block,
- // taking testnet into account
- if(!m_db->height())
+ if (db == nullptr)
+ {
+ LOG_ERROR("Attempted to init Blockchain with null DB");
+ return false;
+ }
+ if (!db->is_open())
+ {
+ LOG_ERROR("Attempted to init Blockchain with unopened DB");
+ return false;
+ }
+
+ m_db = db;
+
+ if (testnet) {
+ m_hardfork = new HardFork(*db, 1, testnet_hard_fork_version_1_till);
+ for (size_t n = 0; n < sizeof(testnet_hard_forks) / sizeof(testnet_hard_forks[0]); ++n)
+ m_hardfork->add_fork(testnet_hard_forks[n].version, testnet_hard_forks[n].height, testnet_hard_forks[n].threshold, testnet_hard_forks[n].time);
+ }
+ else
+ {
+ m_hardfork = new HardFork(*db, 1, mainnet_hard_fork_version_1_till);
+ for (size_t n = 0; n < sizeof(mainnet_hard_forks) / sizeof(mainnet_hard_forks[0]); ++n)
+ m_hardfork->add_fork(mainnet_hard_forks[n].version, mainnet_hard_forks[n].height, mainnet_hard_forks[n].threshold, mainnet_hard_forks[n].time);
+ }
+ m_hardfork->init();
+
+ // if the blockchain is new, add the genesis block
+ // this feels kinda kludgy to do it this way, but can be looked at later.
+ // TODO: add function to create and store genesis block,
+ // taking testnet into account
+ if(!m_db->height())
+ {
+ LOG_PRINT_L0("Blockchain not loaded, generating genesis block.");
+ block bl = boost::value_initialized<block>();
+ block_verification_context bvc = boost::value_initialized<block_verification_context>();
+ if (testnet)
{
- LOG_PRINT_L0("Blockchain not loaded, generating genesis block.");
- block bl = boost::value_initialized<block>();
- block_verification_context bvc = boost::value_initialized<block_verification_context>();
- if (testnet)
- {
- generate_genesis_block(bl, config::testnet::GENESIS_TX, config::testnet::GENESIS_NONCE);
- }
- else
- {
- generate_genesis_block(bl, config::GENESIS_TX, config::GENESIS_NONCE);
- }
- add_new_block(bl, bvc);
- CHECK_AND_ASSERT_MES(!bvc.m_verifivation_failed, false, "Failed to add genesis block to blockchain");
+ generate_genesis_block(bl, config::testnet::GENESIS_TX, config::testnet::GENESIS_NONCE);
}
- // TODO: if blockchain load successful, verify blockchain against both
- // hard-coded and runtime-loaded (and enforced) checkpoints.
else
{
+ generate_genesis_block(bl, config::GENESIS_TX, config::GENESIS_NONCE);
}
+ add_new_block(bl, bvc);
+ CHECK_AND_ASSERT_MES(!bvc.m_verifivation_failed, false, "Failed to add genesis block to blockchain");
+ }
+ // TODO: if blockchain load successful, verify blockchain against both
+ // hard-coded and runtime-loaded (and enforced) checkpoints.
+ else
+ {
+ }
- if (!fakechain)
- {
- // ensure we fixup anything we found and fix in the future
- m_db->fixup();
- }
+ if (!fakechain)
+ {
+ // ensure we fixup anything we found and fix in the future
+ m_db->fixup();
+ }
- // check how far behind we are
- uint64_t top_block_timestamp = m_db->get_top_block_timestamp();
- uint64_t timestamp_diff = time(NULL) - top_block_timestamp;
+ // check how far behind we are
+ uint64_t top_block_timestamp = m_db->get_top_block_timestamp();
+ uint64_t timestamp_diff = time(NULL) - top_block_timestamp;
- // genesis block has no timestamp, could probably change it to have timestamp of 1341378000...
- if(!top_block_timestamp)
- timestamp_diff = time(NULL) - 1341378000;
+ // genesis block has no timestamp, could probably change it to have timestamp of 1341378000...
+ if(!top_block_timestamp)
+ timestamp_diff = time(NULL) - 1341378000;
- // create general purpose async service queue
+ // create general purpose async service queue
- m_async_work_idle = std::unique_ptr < boost::asio::io_service::work > (new boost::asio::io_service::work(m_async_service));
- // we only need 1
- m_async_pool.create_thread(boost::bind(&boost::asio::io_service::run, &m_async_service));
+ m_async_work_idle = std::unique_ptr < boost::asio::io_service::work > (new boost::asio::io_service::work(m_async_service));
+ // we only need 1
+ m_async_pool.create_thread(boost::bind(&boost::asio::io_service::run, &m_async_service));
#if defined(PER_BLOCK_CHECKPOINT)
- if (!fakechain && m_fast_sync && get_blocks_dat_start(testnet) != nullptr)
- {
- if (get_blocks_dat_size(testnet) > 4)
+ if (!fakechain && m_fast_sync && get_blocks_dat_start(testnet) != nullptr)
+ {
+ if (get_blocks_dat_size(testnet) > 4)
+ {
+ const unsigned char *p = get_blocks_dat_start(testnet);
+ const uint32_t nblocks = *p | ((*(p+1))<<8) | ((*(p+2))<<16) | ((*(p+3))<<24);
+ const size_t size_needed = 4 + nblocks * sizeof(crypto::hash);
+ if(nblocks > 0 && nblocks > m_db->height() && get_blocks_dat_size(testnet) >= size_needed)
+ {
+ LOG_PRINT_L0("Loading precomputed blocks: " << nblocks);
+ p += sizeof(uint32_t);
+ for (uint32_t i = 0; i < nblocks; i++)
+ {
+ crypto::hash hash;
+ memcpy(hash.data, p, sizeof(hash.data));
+ p += sizeof(hash.data);
+ m_blocks_hash_check.push_back(hash);
+ }
+
+ // FIXME: clear tx_pool because the process might have been
+ // terminated and caused it to store txs kept by blocks.
+ // The core will not call check_tx_inputs(..) for these
+ // transactions in this case. Consequently, the sanity check
+ // for tx hashes will fail in handle_block_to_main_chain(..)
+ std::list<transaction> txs;
+ m_tx_pool.get_transactions(txs);
+
+ size_t blob_size;
+ uint64_t fee;
+ bool relayed;
+ transaction pool_tx;
+ for(const transaction &tx : txs)
{
- const unsigned char *p = get_blocks_dat_start(testnet);
- const uint32_t nblocks = *p | ((*(p+1))<<8) | ((*(p+2))<<16) | ((*(p+3))<<24);
- const size_t size_needed = 4 + nblocks * sizeof(crypto::hash);
- if(nblocks > 0 && nblocks > m_db->height() && get_blocks_dat_size(testnet) >= size_needed)
- {
- LOG_PRINT_L0("Loading precomputed blocks: " << nblocks);
- p += sizeof(uint32_t);
- for (uint32_t i = 0; i < nblocks; i++)
- {
- crypto::hash hash;
- memcpy(hash.data, p, sizeof(hash.data));
- p += sizeof(hash.data);
- m_blocks_hash_check.push_back(hash);
- }
-
- // FIXME: clear tx_pool because the process might have been
- // terminated and caused it to store txs kept by blocks.
- // The core will not call check_tx_inputs(..) for these
- // transactions in this case. Consequently, the sanity check
- // for tx hashes will fail in handle_block_to_main_chain(..)
- std::list<transaction> txs;
- m_tx_pool.get_transactions(txs);
-
- size_t blob_size;
- uint64_t fee;
- bool relayed;
- transaction pool_tx;
- for(const transaction &tx : txs)
- {
- crypto::hash tx_hash = get_transaction_hash(tx);
- m_tx_pool.take_tx(tx_hash, pool_tx, blob_size, fee, relayed);
- }
- }
+ crypto::hash tx_hash = get_transaction_hash(tx);
+ m_tx_pool.take_tx(tx_hash, pool_tx, blob_size, fee, relayed);
}
+ }
}
+ }
#endif
- LOG_PRINT_GREEN("Blockchain initialized. last block: " << m_db->height() - 1 << ", " << epee::misc_utils::get_time_interval_string(timestamp_diff) << " time ago, current difficulty: " << get_difficulty_for_next_block(), LOG_LEVEL_0);
+ LOG_PRINT_GREEN("Blockchain initialized. last block: " << m_db->height() - 1 << ", " << epee::misc_utils::get_time_interval_string(timestamp_diff) << " time ago, current difficulty: " << get_difficulty_for_next_block(), LOG_LEVEL_0);
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::store_blockchain()
{
- LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3);
- // lock because the rpc_thread command handler also calls this
- CRITICAL_REGION_LOCAL(m_db->m_synchronization_lock);
-
- TIME_MEASURE_START(save);
- // TODO: make sure sync(if this throws that it is not simply ignored higher
- // up the call stack
- try
- {
- m_db->sync();
- }
- catch (const std::exception& e)
- {
- LOG_PRINT_L0(std::string("Error syncing blockchain db: ") + e.what() + "-- shutting down now to prevent issues!");
- throw;
- }
- catch (...)
- {
- LOG_PRINT_L0("There was an issue storing the blockchain, shutting down now to prevent issues!");
- throw;
- }
-
- TIME_MEASURE_FINISH(save);
- if(m_show_time_stats)
- LOG_PRINT_L0("Blockchain stored OK, took: " << save << " ms");
- return true;
+ LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3);
+ // lock because the rpc_thread command handler also calls this
+ CRITICAL_REGION_LOCAL(m_db->m_synchronization_lock);
+
+ TIME_MEASURE_START(save);
+ // TODO: make sure sync(if this throws that it is not simply ignored higher
+ // up the call stack
+ try
+ {
+ m_db->sync();
+ }
+ catch (const std::exception& e)
+ {
+ LOG_PRINT_L0(std::string("Error syncing blockchain db: ") + e.what() + "-- shutting down now to prevent issues!");
+ throw;
+ }
+ catch (...)
+ {
+ LOG_PRINT_L0("There was an issue storing the blockchain, shutting down now to prevent issues!");
+ throw;
+ }
+
+ TIME_MEASURE_FINISH(save);
+ if(m_show_time_stats)
+ LOG_PRINT_L0("Blockchain stored OK, took: " << save << " ms");
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::deinit()
{
- LOG_PRINT_L3("Blockchain::" << __func__);
+ LOG_PRINT_L3("Blockchain::" << __func__);
- LOG_PRINT_L0("Closing IO Service.")
+ LOG_PRINT_L0("Closing IO Service.")
// stop async service
m_async_work_idle.reset();
- m_async_pool.join_all();
- m_async_service.stop();
-
- // as this should be called if handling a SIGSEGV, need to check
- // if m_db is a NULL pointer (and thus may have caused the illegal
- // memory operation), otherwise we may cause a loop.
- if (m_db == NULL)
- {
- throw new DB_ERROR("The db pointer is null in Blockchain, the blockchain may be corrupt!");
- }
-
- try
- {
- m_db->close();
- }
- catch (const std::exception& e)
- {
- LOG_PRINT_L0(std::string("Error closing blockchain db: ") + e.what());
- }
- catch (...)
- {
- LOG_PRINT_L0("There was an issue closing/storing the blockchain, shutting down now to prevent issues!");
- }
-
- delete m_hardfork;
- delete m_db;
- return true;
+ m_async_pool.join_all();
+ m_async_service.stop();
+
+ // as this should be called if handling a SIGSEGV, need to check
+ // if m_db is a NULL pointer (and thus may have caused the illegal
+ // memory operation), otherwise we may cause a loop.
+ if (m_db == NULL)
+ {
+ throw new DB_ERROR("The db pointer is null in Blockchain, the blockchain may be corrupt!");
+ }
+
+ try
+ {
+ m_db->close();
+ }
+ catch (const std::exception& e)
+ {
+ LOG_PRINT_L0(std::string("Error closing blockchain db: ") + e.what());
+ }
+ catch (...)
+ {
+ LOG_PRINT_L0("There was an issue closing/storing the blockchain, shutting down now to prevent issues!");
+ }
+
+ delete m_hardfork;
+ delete m_db;
+ return true;
}
//------------------------------------------------------------------
// This function tells BlockchainDB to remove the top block from the
@@ -431,82 +431,82 @@ bool Blockchain::deinit()
// from it to the tx_pool
block Blockchain::pop_block_from_blockchain()
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
-
- m_timestamps_and_difficulties_height = 0;
-
- block popped_block;
- std::vector<transaction> popped_txs;
-
- try
- {
- m_db->pop_block(popped_block, popped_txs);
- }
- // anything that could cause this to throw is likely catastrophic,
- // so we re-throw
- catch (const std::exception& e)
- {
- LOG_ERROR("Error popping block from blockchain: " << e.what());
- throw;
- }
- catch (...)
- {
- LOG_ERROR("Error popping block from blockchain, throwing!");
- throw;
- }
-
- // return transactions from popped block to the tx_pool
- for (transaction& tx : popped_txs)
- {
- if (!is_coinbase(tx))
- {
- cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
- // We assume that if they were in a block, the transactions are already
- // known to the network as a whole. However, if we had mined that block,
- // that might not be always true. Unlikely though, and always relaying
- // these again might cause a spike of traffic as many nodes re-relay
- // all the transactions in a popped block when a reorg happens.
- bool r = m_tx_pool.add_tx(tx, tvc, true, true);
- if (!r)
- {
- LOG_ERROR("Error returning transaction to tx_pool");
- }
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+
+ m_timestamps_and_difficulties_height = 0;
+
+ block popped_block;
+ std::vector<transaction> popped_txs;
+
+ try
+ {
+ m_db->pop_block(popped_block, popped_txs);
+ }
+ // anything that could cause this to throw is likely catastrophic,
+ // so we re-throw
+ catch (const std::exception& e)
+ {
+ LOG_ERROR("Error popping block from blockchain: " << e.what());
+ throw;
+ }
+ catch (...)
+ {
+ LOG_ERROR("Error popping block from blockchain, throwing!");
+ throw;
+ }
+
+ // return transactions from popped block to the tx_pool
+ for (transaction& tx : popped_txs)
+ {
+ if (!is_coinbase(tx))
+ {
+ cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
+ // We assume that if they were in a block, the transactions are already
+ // known to the network as a whole. However, if we had mined that block,
+ // that might not be always true. Unlikely though, and always relaying
+ // these again might cause a spike of traffic as many nodes re-relay
+ // all the transactions in a popped block when a reorg happens.
+ bool r = m_tx_pool.add_tx(tx, tvc, true, true);
+ if (!r)
+ {
+ LOG_ERROR("Error returning transaction to tx_pool");
+ }
}
+ }
- return popped_block;
+ return popped_block;
}
//------------------------------------------------------------------
bool Blockchain::reset_and_set_genesis_block(const block& b)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- m_transactions.clear();
- m_blocks.clear();
- m_blocks_index.clear();
- m_alternative_chains.clear();
- m_outputs.clear();
- m_db->reset();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ m_transactions.clear();
+ m_blocks.clear();
+ m_blocks_index.clear();
+ m_alternative_chains.clear();
+ m_outputs.clear();
+ m_db->reset();
- block_verification_context bvc = boost::value_initialized<block_verification_context>();
- add_new_block(b, bvc);
- return bvc.m_added_to_main_chain && !bvc.m_verifivation_failed;
+ block_verification_context bvc = boost::value_initialized<block_verification_context>();
+ add_new_block(b, bvc);
+ return bvc.m_added_to_main_chain && !bvc.m_verifivation_failed;
}
//------------------------------------------------------------------
crypto::hash Blockchain::get_tail_id(uint64_t& height) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- height = m_db->height() - 1;
- return get_tail_id();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ height = m_db->height() - 1;
+ return get_tail_id();
}
//------------------------------------------------------------------
crypto::hash Blockchain::get_tail_id() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- return m_db->top_block_hash();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ return m_db->top_block_hash();
}
//------------------------------------------------------------------
/*TODO: this function was...poorly written. As such, I'm not entirely
@@ -523,120 +523,120 @@ crypto::hash Blockchain::get_tail_id() const
*/
bool Blockchain::get_short_chain_history(std::list<crypto::hash>& ids) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- uint64_t i = 0;
- uint64_t current_multiplier = 1;
- uint64_t sz = m_db->height();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ uint64_t i = 0;
+ uint64_t current_multiplier = 1;
+ uint64_t sz = m_db->height();
- if(!sz)
- return true;
+ if(!sz)
+ return true;
- bool genesis_included = false;
- uint64_t current_back_offset = 1;
- while(current_back_offset < sz)
- {
- ids.push_back(m_db->get_block_hash_from_height(sz - current_back_offset));
+ bool genesis_included = false;
+ uint64_t current_back_offset = 1;
+ while(current_back_offset < sz)
+ {
+ ids.push_back(m_db->get_block_hash_from_height(sz - current_back_offset));
- if(sz-current_back_offset == 0)
- {
- genesis_included = true;
- }
- if(i < 10)
- {
- ++current_back_offset;
- }
- else
- {
- current_multiplier *= 2;
- current_back_offset += current_multiplier;
- }
- ++i;
+ if(sz-current_back_offset == 0)
+ {
+ genesis_included = true;
}
-
- if (!genesis_included)
+ if(i < 10)
+ {
+ ++current_back_offset;
+ }
+ else
{
- ids.push_back(m_db->get_block_hash_from_height(0));
+ current_multiplier *= 2;
+ current_back_offset += current_multiplier;
}
+ ++i;
+ }
- return true;
+ if (!genesis_included)
+ {
+ ids.push_back(m_db->get_block_hash_from_height(0));
+ }
+
+ return true;
}
//------------------------------------------------------------------
crypto::hash Blockchain::get_block_id_by_height(uint64_t height) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- try
- {
- return m_db->get_block_hash_from_height(height);
- }
- catch (const BLOCK_DNE& e)
- {
- }
- catch (const std::exception& e)
- {
- LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height: ") + e.what());
- throw;
- }
- catch (...)
- {
- LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height"));
- throw;
- }
- return null_hash;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ try
+ {
+ return m_db->get_block_hash_from_height(height);
+ }
+ catch (const BLOCK_DNE& e)
+ {
+ }
+ catch (const std::exception& e)
+ {
+ LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height: ") + e.what());
+ throw;
+ }
+ catch (...)
+ {
+ LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height"));
+ throw;
+ }
+ return null_hash;
}
//------------------------------------------------------------------
bool Blockchain::get_block_by_hash(const crypto::hash &h, block &blk) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // try to find block in main chain
- try
- {
- blk = m_db->get_block(h);
- return true;
- }
- // try to find block in alternative chain
- catch (const BLOCK_DNE& e)
- {
- blocks_ext_by_hash::const_iterator it_alt = m_alternative_chains.find(h);
- if (m_alternative_chains.end() != it_alt)
- {
- blk = it_alt->second.bl;
- return true;
- }
- }
- catch (const std::exception& e)
- {
- LOG_PRINT_L0(std::string("Something went wrong fetching block by hash: ") + e.what());
- throw;
- }
- catch (...)
- {
- LOG_PRINT_L0(std::string("Something went wrong fetching block hash by hash"));
- throw;
- }
-
- return false;
+ // try to find block in main chain
+ try
+ {
+ blk = m_db->get_block(h);
+ return true;
+ }
+ // try to find block in alternative chain
+ catch (const BLOCK_DNE& e)
+ {
+ blocks_ext_by_hash::const_iterator it_alt = m_alternative_chains.find(h);
+ if (m_alternative_chains.end() != it_alt)
+ {
+ blk = it_alt->second.bl;
+ return true;
+ }
+ }
+ catch (const std::exception& e)
+ {
+ LOG_PRINT_L0(std::string("Something went wrong fetching block by hash: ") + e.what());
+ throw;
+ }
+ catch (...)
+ {
+ LOG_PRINT_L0(std::string("Something went wrong fetching block hash by hash"));
+ throw;
+ }
+
+ return false;
}
//------------------------------------------------------------------
//FIXME: this function does not seem to be called from anywhere, but
// if it ever is, should probably change std::list for std::vector
void Blockchain::get_all_known_block_ids(std::list<crypto::hash> &main, std::list<crypto::hash> &alt, std::list<crypto::hash> &invalid) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- for (auto& a : m_db->get_hashes_range(0, m_db->height() - 1))
- {
- main.push_back(a);
- }
+ for (auto& a : m_db->get_hashes_range(0, m_db->height() - 1))
+ {
+ main.push_back(a);
+ }
- BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_alternative_chains)
+ BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_alternative_chains)
alt.push_back(v.first);
- BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_invalid_blocks)
+ BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_invalid_blocks)
invalid.push_back(v.first);
}
//------------------------------------------------------------------
@@ -646,51 +646,51 @@ void Blockchain::get_all_known_block_ids(std::list<crypto::hash> &main, std::lis
// less blocks than desired if there aren't enough.
difficulty_type Blockchain::get_difficulty_for_next_block()
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- std::vector<uint64_t> timestamps;
- std::vector<difficulty_type> difficulties;
- auto height = m_db->height();
- // ND: Speedup
- // 1. Keep a list of the last 735 (or less) blocks that is used to compute difficulty,
- // then when the next block difficulty is queried, push the latest height data and
- // pop the oldest one from the list. This only requires 1x read per height instead
- // of doing 735 (DIFFICULTY_BLOCKS_COUNT).
- if (m_timestamps_and_difficulties_height != 0 && ((height - m_timestamps_and_difficulties_height) == 1))
- {
- uint64_t index = height - 1;
- m_timestamps.push_back(m_db->get_block_timestamp(index));
- m_difficulties.push_back(m_db->get_block_cumulative_difficulty(index));
-
- while (m_timestamps.size() > DIFFICULTY_BLOCKS_COUNT)
- m_timestamps.erase(m_timestamps.begin());
- while (m_difficulties.size() > DIFFICULTY_BLOCKS_COUNT)
- m_difficulties.erase(m_difficulties.begin());
-
- m_timestamps_and_difficulties_height = height;
- timestamps = m_timestamps;
- difficulties = m_difficulties;
- }
- else
- {
- size_t offset = height - std::min < size_t > (height, static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT));
- if (offset == 0)
- ++offset;
-
- timestamps.clear();
- difficulties.clear();
- for (; offset < height; offset++)
- {
- timestamps.push_back(m_db->get_block_timestamp(offset));
- difficulties.push_back(m_db->get_block_cumulative_difficulty(offset));
- }
-
- m_timestamps_and_difficulties_height = height;
- m_timestamps = timestamps;
- m_difficulties = difficulties;
- }
- size_t target = get_current_hard_fork_version() < 2 ? DIFFICULTY_TARGET_V1 : DIFFICULTY_TARGET;
- return next_difficulty(timestamps, difficulties, target);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ std::vector<uint64_t> timestamps;
+ std::vector<difficulty_type> difficulties;
+ auto height = m_db->height();
+ // ND: Speedup
+ // 1. Keep a list of the last 735 (or less) blocks that is used to compute difficulty,
+ // then when the next block difficulty is queried, push the latest height data and
+ // pop the oldest one from the list. This only requires 1x read per height instead
+ // of doing 735 (DIFFICULTY_BLOCKS_COUNT).
+ if (m_timestamps_and_difficulties_height != 0 && ((height - m_timestamps_and_difficulties_height) == 1))
+ {
+ uint64_t index = height - 1;
+ m_timestamps.push_back(m_db->get_block_timestamp(index));
+ m_difficulties.push_back(m_db->get_block_cumulative_difficulty(index));
+
+ while (m_timestamps.size() > DIFFICULTY_BLOCKS_COUNT)
+ m_timestamps.erase(m_timestamps.begin());
+ while (m_difficulties.size() > DIFFICULTY_BLOCKS_COUNT)
+ m_difficulties.erase(m_difficulties.begin());
+
+ m_timestamps_and_difficulties_height = height;
+ timestamps = m_timestamps;
+ difficulties = m_difficulties;
+ }
+ else
+ {
+ size_t offset = height - std::min < size_t > (height, static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT));
+ if (offset == 0)
+ ++offset;
+
+ timestamps.clear();
+ difficulties.clear();
+ for (; offset < height; offset++)
+ {
+ timestamps.push_back(m_db->get_block_timestamp(offset));
+ difficulties.push_back(m_db->get_block_cumulative_difficulty(offset));
+ }
+
+ m_timestamps_and_difficulties_height = height;
+ m_timestamps = timestamps;
+ m_difficulties = difficulties;
+ }
+ size_t target = get_current_hard_fork_version() < 2 ? DIFFICULTY_TARGET_V1 : DIFFICULTY_TARGET;
+ return next_difficulty(timestamps, difficulties, target);
}
//------------------------------------------------------------------
// This function removes blocks from the blockchain until it gets to the
@@ -698,193 +698,193 @@ difficulty_type Blockchain::get_difficulty_for_next_block()
// that had been removed.
bool Blockchain::rollback_blockchain_switching(std::list<block>& original_chain, uint64_t rollback_height)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- m_timestamps_and_difficulties_height = 0;
+ m_timestamps_and_difficulties_height = 0;
- // remove blocks from blockchain until we get back to where we should be.
- while (m_db->height() != rollback_height)
- {
- pop_block_from_blockchain();
- }
+ // remove blocks from blockchain until we get back to where we should be.
+ while (m_db->height() != rollback_height)
+ {
+ pop_block_from_blockchain();
+ }
- //return back original chain
- for (auto& bl : original_chain)
- {
- block_verification_context bvc = boost::value_initialized<block_verification_context>();
- bool r = handle_block_to_main_chain(bl, bvc);
- CHECK_AND_ASSERT_MES(r && bvc.m_added_to_main_chain, false, "PANIC! failed to add (again) block while chain switching during the rollback!");
- }
+ //return back original chain
+ for (auto& bl : original_chain)
+ {
+ block_verification_context bvc = boost::value_initialized<block_verification_context>();
+ bool r = handle_block_to_main_chain(bl, bvc);
+ CHECK_AND_ASSERT_MES(r && bvc.m_added_to_main_chain, false, "PANIC! failed to add (again) block while chain switching during the rollback!");
+ }
- m_hardfork->reorganize_from_chain_height(rollback_height);
+ m_hardfork->reorganize_from_chain_height(rollback_height);
- LOG_PRINT_L1("Rollback to height " << rollback_height << " was successful.");
- if (original_chain.size())
- {
- LOG_PRINT_L1("Restoration to previous blockchain successful as well.");
- }
- return true;
+ LOG_PRINT_L1("Rollback to height " << rollback_height << " was successful.");
+ if (original_chain.size())
+ {
+ LOG_PRINT_L1("Restoration to previous blockchain successful as well.");
+ }
+ return true;
}
//------------------------------------------------------------------
// This function attempts to switch to an alternate chain, returning
// boolean based on success therein.
bool Blockchain::switch_to_alternative_blockchain(std::list<blocks_ext_by_hash::iterator>& alt_chain, bool discard_disconnected_chain)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- m_timestamps_and_difficulties_height = 0;
+ m_timestamps_and_difficulties_height = 0;
- // if empty alt chain passed (not sure how that could happen), return false
- CHECK_AND_ASSERT_MES(alt_chain.size(), false, "switch_to_alternative_blockchain: empty chain passed");
-
- // verify that main chain has front of alt chain's parent block
- if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id))
- {
- LOG_ERROR("Attempting to move to an alternate chain, but it doesn't appear to connect to the main chain!");
- return false;
- }
+ // if empty alt chain passed (not sure how that could happen), return false
+ CHECK_AND_ASSERT_MES(alt_chain.size(), false, "switch_to_alternative_blockchain: empty chain passed");
- // pop blocks from the blockchain until the top block is the parent
- // of the front block of the alt chain.
- std::list<block> disconnected_chain;
- while (m_db->top_block_hash() != alt_chain.front()->second.bl.prev_id)
- {
- block b = pop_block_from_blockchain();
- disconnected_chain.push_front(b);
- }
+ // verify that main chain has front of alt chain's parent block
+ if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id))
+ {
+ LOG_ERROR("Attempting to move to an alternate chain, but it doesn't appear to connect to the main chain!");
+ return false;
+ }
+
+ // pop blocks from the blockchain until the top block is the parent
+ // of the front block of the alt chain.
+ std::list<block> disconnected_chain;
+ while (m_db->top_block_hash() != alt_chain.front()->second.bl.prev_id)
+ {
+ block b = pop_block_from_blockchain();
+ disconnected_chain.push_front(b);
+ }
+
+ auto split_height = m_db->height();
+
+ //connecting new alternative chain
+ for(auto alt_ch_iter = alt_chain.begin(); alt_ch_iter != alt_chain.end(); alt_ch_iter++)
+ {
+ auto ch_ent = *alt_ch_iter;
+ block_verification_context bvc = boost::value_initialized<block_verification_context>();
- auto split_height = m_db->height();
+ // add block to main chain
+ bool r = handle_block_to_main_chain(ch_ent->second.bl, bvc);
- //connecting new alternative chain
- for(auto alt_ch_iter = alt_chain.begin(); alt_ch_iter != alt_chain.end(); alt_ch_iter++)
+ // if adding block to main chain failed, rollback to previous state and
+ // return false
+ if(!r || !bvc.m_added_to_main_chain)
{
- auto ch_ent = *alt_ch_iter;
- block_verification_context bvc = boost::value_initialized<block_verification_context>();
-
- // add block to main chain
- bool r = handle_block_to_main_chain(ch_ent->second.bl, bvc);
-
- // if adding block to main chain failed, rollback to previous state and
- // return false
- if(!r || !bvc.m_added_to_main_chain)
- {
- LOG_PRINT_L1("Failed to switch to alternative blockchain");
+ LOG_PRINT_L1("Failed to switch to alternative blockchain");
- // rollback_blockchain_switching should be moved to two different
- // functions: rollback and apply_chain, but for now we pretend it is
- // just the latter (because the rollback was done above).
- rollback_blockchain_switching(disconnected_chain, m_db->height());
+ // rollback_blockchain_switching should be moved to two different
+ // functions: rollback and apply_chain, but for now we pretend it is
+ // just the latter (because the rollback was done above).
+ rollback_blockchain_switching(disconnected_chain, m_db->height());
- // FIXME: Why do we keep invalid blocks around? Possibly in case we hear
- // about them again so we can immediately dismiss them, but needs some
- // looking into.
- add_block_as_invalid(ch_ent->second, get_block_hash(ch_ent->second.bl));
- LOG_PRINT_L1("The block was inserted as invalid while connecting new alternative chain, block_id: " << get_block_hash(ch_ent->second.bl));
- m_alternative_chains.erase(ch_ent);
+ // FIXME: Why do we keep invalid blocks around? Possibly in case we hear
+ // about them again so we can immediately dismiss them, but needs some
+ // looking into.
+ add_block_as_invalid(ch_ent->second, get_block_hash(ch_ent->second.bl));
+ LOG_PRINT_L1("The block was inserted as invalid while connecting new alternative chain, block_id: " << get_block_hash(ch_ent->second.bl));
+ m_alternative_chains.erase(ch_ent);
- for(auto alt_ch_to_orph_iter = ++alt_ch_iter; alt_ch_to_orph_iter != alt_chain.end(); alt_ch_to_orph_iter++)
- {
- add_block_as_invalid((*alt_ch_iter)->second, (*alt_ch_iter)->first);
- m_alternative_chains.erase(*alt_ch_to_orph_iter);
- }
- return false;
- }
- }
-
- // if we're to keep the disconnected blocks, add them as alternates
- if(!discard_disconnected_chain)
- {
- //pushing old chain as alternative chain
- for (auto& old_ch_ent : disconnected_chain)
- {
- block_verification_context bvc = boost::value_initialized<block_verification_context>();
- bool r = handle_alternative_block(old_ch_ent, get_block_hash(old_ch_ent), bvc);
- if(!r)
- {
- LOG_PRINT_L1("Failed to push ex-main chain blocks to alternative chain ");
- // previously this would fail the blockchain switching, but I don't
- // think this is bad enough to warrant that.
- }
- }
+ for(auto alt_ch_to_orph_iter = ++alt_ch_iter; alt_ch_to_orph_iter != alt_chain.end(); alt_ch_to_orph_iter++)
+ {
+ add_block_as_invalid((*alt_ch_iter)->second, (*alt_ch_iter)->first);
+ m_alternative_chains.erase(*alt_ch_to_orph_iter);
+ }
+ return false;
+ }
+ }
+
+ // if we're to keep the disconnected blocks, add them as alternates
+ if(!discard_disconnected_chain)
+ {
+ //pushing old chain as alternative chain
+ for (auto& old_ch_ent : disconnected_chain)
+ {
+ block_verification_context bvc = boost::value_initialized<block_verification_context>();
+ bool r = handle_alternative_block(old_ch_ent, get_block_hash(old_ch_ent), bvc);
+ if(!r)
+ {
+ LOG_PRINT_L1("Failed to push ex-main chain blocks to alternative chain ");
+ // previously this would fail the blockchain switching, but I don't
+ // think this is bad enough to warrant that.
+ }
}
+ }
- //removing alt_chain entries from alternative chain
- BOOST_FOREACH(auto ch_ent, alt_chain)
- {
- m_alternative_chains.erase(ch_ent);
- }
+ //removing alt_chain entries from alternative chain
+ BOOST_FOREACH(auto ch_ent, alt_chain)
+ {
+ m_alternative_chains.erase(ch_ent);
+ }
- m_hardfork->reorganize_from_chain_height(split_height);
+ m_hardfork->reorganize_from_chain_height(split_height);
- LOG_PRINT_GREEN("REORGANIZE SUCCESS! on height: " << split_height << ", new blockchain size: " << m_db->height(), LOG_LEVEL_0);
- return true;
+ LOG_PRINT_GREEN("REORGANIZE SUCCESS! on height: " << split_height << ", new blockchain size: " << m_db->height(), LOG_LEVEL_0);
+ return true;
}
//------------------------------------------------------------------
// This function calculates the difficulty target for the block being added to
// an alternate chain.
difficulty_type Blockchain::get_next_difficulty_for_alternative_chain(const std::list<blocks_ext_by_hash::iterator>& alt_chain, block_extended_info& bei) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- std::vector<uint64_t> timestamps;
- std::vector<difficulty_type> cumulative_difficulties;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ std::vector<uint64_t> timestamps;
+ std::vector<difficulty_type> cumulative_difficulties;
- // if the alt chain isn't long enough to calculate the difficulty target
- // based on its blocks alone, need to get more blocks from the main chain
- if(alt_chain.size()< DIFFICULTY_BLOCKS_COUNT)
- {
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ // if the alt chain isn't long enough to calculate the difficulty target
+ // based on its blocks alone, need to get more blocks from the main chain
+ if(alt_chain.size()< DIFFICULTY_BLOCKS_COUNT)
+ {
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // Figure out start and stop offsets for main chain blocks
- size_t main_chain_stop_offset = alt_chain.size() ? alt_chain.front()->second.height : bei.height;
- size_t main_chain_count = DIFFICULTY_BLOCKS_COUNT - std::min(static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT), alt_chain.size());
- main_chain_count = std::min(main_chain_count, main_chain_stop_offset);
- size_t main_chain_start_offset = main_chain_stop_offset - main_chain_count;
+ // Figure out start and stop offsets for main chain blocks
+ size_t main_chain_stop_offset = alt_chain.size() ? alt_chain.front()->second.height : bei.height;
+ size_t main_chain_count = DIFFICULTY_BLOCKS_COUNT - std::min(static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT), alt_chain.size());
+ main_chain_count = std::min(main_chain_count, main_chain_stop_offset);
+ size_t main_chain_start_offset = main_chain_stop_offset - main_chain_count;
- if(!main_chain_start_offset)
- ++main_chain_start_offset; //skip genesis block
+ if(!main_chain_start_offset)
+ ++main_chain_start_offset; //skip genesis block
- // get difficulties and timestamps from relevant main chain blocks
- for(; main_chain_start_offset < main_chain_stop_offset; ++main_chain_start_offset)
- {
- timestamps.push_back(m_db->get_block_timestamp(main_chain_start_offset));
- cumulative_difficulties.push_back(m_db->get_block_cumulative_difficulty(main_chain_start_offset));
- }
+ // get difficulties and timestamps from relevant main chain blocks
+ for(; main_chain_start_offset < main_chain_stop_offset; ++main_chain_start_offset)
+ {
+ timestamps.push_back(m_db->get_block_timestamp(main_chain_start_offset));
+ cumulative_difficulties.push_back(m_db->get_block_cumulative_difficulty(main_chain_start_offset));
+ }
- // make sure we haven't accidentally grabbed too many blocks...maybe don't need this check?
- CHECK_AND_ASSERT_MES((alt_chain.size() + timestamps.size()) <= DIFFICULTY_BLOCKS_COUNT, false, "Internal error, alt_chain.size()[" << alt_chain.size() << "] + vtimestampsec.size()[" << timestamps.size() << "] NOT <= DIFFICULTY_WINDOW[]" << DIFFICULTY_BLOCKS_COUNT);
+ // make sure we haven't accidentally grabbed too many blocks...maybe don't need this check?
+ CHECK_AND_ASSERT_MES((alt_chain.size() + timestamps.size()) <= DIFFICULTY_BLOCKS_COUNT, false, "Internal error, alt_chain.size()[" << alt_chain.size() << "] + vtimestampsec.size()[" << timestamps.size() << "] NOT <= DIFFICULTY_WINDOW[]" << DIFFICULTY_BLOCKS_COUNT);
- for (auto it : alt_chain)
- {
- timestamps.push_back(it->second.bl.timestamp);
- cumulative_difficulties.push_back(it->second.cumulative_difficulty);
- }
+ for (auto it : alt_chain)
+ {
+ timestamps.push_back(it->second.bl.timestamp);
+ cumulative_difficulties.push_back(it->second.cumulative_difficulty);
}
- // if the alt chain is long enough for the difficulty calc, grab difficulties
- // and timestamps from it alone
- else
+ }
+ // if the alt chain is long enough for the difficulty calc, grab difficulties
+ // and timestamps from it alone
+ else
+ {
+ timestamps.resize(static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT));
+ cumulative_difficulties.resize(static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT));
+ size_t count = 0;
+ size_t max_i = timestamps.size()-1;
+ // get difficulties and timestamps from most recent blocks in alt chain
+ BOOST_REVERSE_FOREACH(auto it, alt_chain)
{
- timestamps.resize(static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT));
- cumulative_difficulties.resize(static_cast<size_t>(DIFFICULTY_BLOCKS_COUNT));
- size_t count = 0;
- size_t max_i = timestamps.size()-1;
- // get difficulties and timestamps from most recent blocks in alt chain
- BOOST_REVERSE_FOREACH(auto it, alt_chain)
- {
- timestamps[max_i - count] = it->second.bl.timestamp;
- cumulative_difficulties[max_i - count] = it->second.cumulative_difficulty;
- count++;
- if(count >= DIFFICULTY_BLOCKS_COUNT)
- break;
- }
+ timestamps[max_i - count] = it->second.bl.timestamp;
+ cumulative_difficulties[max_i - count] = it->second.cumulative_difficulty;
+ count++;
+ if(count >= DIFFICULTY_BLOCKS_COUNT)
+ break;
}
+ }
- // FIXME: This will fail if fork activation heights are subject to voting
- size_t target = get_ideal_hard_fork_version(bei.height) < 2 ? DIFFICULTY_TARGET_V1 : DIFFICULTY_TARGET;
+ // FIXME: This will fail if fork activation heights are subject to voting
+ size_t target = get_ideal_hard_fork_version(bei.height) < 2 ? DIFFICULTY_TARGET_V1 : DIFFICULTY_TARGET;
- // calculate the difficulty target for the block and return it
- return next_difficulty(timestamps, cumulative_difficulties, target);
+ // calculate the difficulty target for the block and return it
+ return next_difficulty(timestamps, cumulative_difficulties, target);
}
//------------------------------------------------------------------
// This function does a sanity check on basic things that all miner
@@ -894,94 +894,94 @@ difficulty_type Blockchain::get_next_difficulty_for_alternative_chain(const std:
// a non-overflowing tx amount (dubious necessity on this check)
bool Blockchain::prevalidate_miner_transaction(const block& b, uint64_t height)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CHECK_AND_ASSERT_MES(b.miner_tx.vin.size() == 1, false, "coinbase transaction in the block has no inputs");
- CHECK_AND_ASSERT_MES(b.miner_tx.vin[0].type() == typeid(txin_gen), false, "coinbase transaction in the block has the wrong type");
- if(boost::get<txin_gen>(b.miner_tx.vin[0]).height != height)
- {
- LOG_PRINT_RED_L1("The miner transaction in block has invalid height: " << boost::get<txin_gen>(b.miner_tx.vin[0]).height << ", expected: " << height);
- return false;
- }
- CHECK_AND_ASSERT_MES(b.miner_tx.unlock_time == height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW, false, "coinbase transaction transaction has the wrong unlock time=" << b.miner_tx.unlock_time << ", expected " << height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW);
-
- //check outs overflow
- //NOTE: not entirely sure this is necessary, given that this function is
- // designed simply to make sure the total amount for a transaction
- // does not overflow a uint64_t, and this transaction *is* a uint64_t...
- if(!check_outs_overflow(b.miner_tx))
- {
- LOG_PRINT_RED_L1("miner transaction has money overflow in block " << get_block_hash(b));
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CHECK_AND_ASSERT_MES(b.miner_tx.vin.size() == 1, false, "coinbase transaction in the block has no inputs");
+ CHECK_AND_ASSERT_MES(b.miner_tx.vin[0].type() == typeid(txin_gen), false, "coinbase transaction in the block has the wrong type");
+ if(boost::get<txin_gen>(b.miner_tx.vin[0]).height != height)
+ {
+ LOG_PRINT_RED_L1("The miner transaction in block has invalid height: " << boost::get<txin_gen>(b.miner_tx.vin[0]).height << ", expected: " << height);
+ return false;
+ }
+ CHECK_AND_ASSERT_MES(b.miner_tx.unlock_time == height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW, false, "coinbase transaction transaction has the wrong unlock time=" << b.miner_tx.unlock_time << ", expected " << height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW);
+
+ //check outs overflow
+ //NOTE: not entirely sure this is necessary, given that this function is
+ // designed simply to make sure the total amount for a transaction
+ // does not overflow a uint64_t, and this transaction *is* a uint64_t...
+ if(!check_outs_overflow(b.miner_tx))
+ {
+ LOG_PRINT_RED_L1("miner transaction has money overflow in block " << get_block_hash(b));
+ return false;
+ }
- return true;
+ return true;
}
//------------------------------------------------------------------
// This function validates the miner transaction reward
bool Blockchain::validate_miner_transaction(const block& b, size_t cumulative_block_size, uint64_t fee, uint64_t& base_reward, uint64_t already_generated_coins)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- //validate reward
- uint64_t money_in_use = 0;
- BOOST_FOREACH(auto& o, b.miner_tx.vout)
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ //validate reward
+ uint64_t money_in_use = 0;
+ BOOST_FOREACH(auto& o, b.miner_tx.vout)
money_in_use += o.amount;
- std::vector<size_t> last_blocks_sizes;
- get_last_n_blocks_sizes(last_blocks_sizes, CRYPTONOTE_REWARD_BLOCKS_WINDOW);
- if (!get_block_reward(epee::misc_utils::median(last_blocks_sizes), cumulative_block_size, already_generated_coins, base_reward, get_current_hard_fork_version()))
- {
- LOG_PRINT_L1("block size " << cumulative_block_size << " is bigger than allowed for this blockchain");
- return false;
- }
- if(base_reward + fee < money_in_use)
- {
- LOG_PRINT_L1("coinbase transaction spend too much money (" << print_money(money_in_use) << "). Block reward is " << print_money(base_reward + fee) << "(" << print_money(base_reward) << "+" << print_money(fee) << ")");
- return false;
- }
- // From hard fork 2, we allow a miner to claim less block reward than is allowed, in case a miner wants less dust
- if (m_hardfork->get_current_version() < 2)
- {
- if(base_reward + fee != money_in_use)
- {
- LOG_PRINT_L1("coinbase transaction doesn't use full amount of block reward: spent: " << money_in_use << ", block reward " << base_reward + fee << "(" << base_reward << "+" << fee << ")");
- return false;
- }
- }
- else
- {
- // from hard fork 2, since a miner can claim less than the full block reward, we update the base_reward
- // to show the amount of coins that were actually generated, the remainder will be pushed back for later
- // emission. This modifies the emission curve very slightly.
- CHECK_AND_ASSERT_MES(money_in_use - fee <= base_reward, false, "base reward calculation bug");
- base_reward = money_in_use - fee;
- }
- return true;
+ std::vector<size_t> last_blocks_sizes;
+ get_last_n_blocks_sizes(last_blocks_sizes, CRYPTONOTE_REWARD_BLOCKS_WINDOW);
+ if (!get_block_reward(epee::misc_utils::median(last_blocks_sizes), cumulative_block_size, already_generated_coins, base_reward, get_current_hard_fork_version()))
+ {
+ LOG_PRINT_L1("block size " << cumulative_block_size << " is bigger than allowed for this blockchain");
+ return false;
+ }
+ if(base_reward + fee < money_in_use)
+ {
+ LOG_PRINT_L1("coinbase transaction spend too much money (" << print_money(money_in_use) << "). Block reward is " << print_money(base_reward + fee) << "(" << print_money(base_reward) << "+" << print_money(fee) << ")");
+ return false;
+ }
+ // From hard fork 2, we allow a miner to claim less block reward than is allowed, in case a miner wants less dust
+ if (m_hardfork->get_current_version() < 2)
+ {
+ if(base_reward + fee != money_in_use)
+ {
+ LOG_PRINT_L1("coinbase transaction doesn't use full amount of block reward: spent: " << money_in_use << ", block reward " << base_reward + fee << "(" << base_reward << "+" << fee << ")");
+ return false;
+ }
+ }
+ else
+ {
+ // from hard fork 2, since a miner can claim less than the full block reward, we update the base_reward
+ // to show the amount of coins that were actually generated, the remainder will be pushed back for later
+ // emission. This modifies the emission curve very slightly.
+ CHECK_AND_ASSERT_MES(money_in_use - fee <= base_reward, false, "base reward calculation bug");
+ base_reward = money_in_use - fee;
+ }
+ return true;
}
//------------------------------------------------------------------
// get the block sizes of the last <count> blocks, starting at <from_height>
// and return by reference <sz>.
void Blockchain::get_last_n_blocks_sizes(std::vector<size_t>& sz, size_t count) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- auto h = m_db->height();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ auto h = m_db->height();
- // this function is meaningless for an empty blockchain...granted it should never be empty
- if(h == 0)
- return;
+ // this function is meaningless for an empty blockchain...granted it should never be empty
+ if(h == 0)
+ return;
- // add size of last <count> blocks to vector <sz> (or less, if blockchain size < count)
- size_t start_offset = h - std::min<size_t>(h, count);
- for(size_t i = start_offset; i < h; i++)
- {
- sz.push_back(m_db->get_block_size(i));
- }
+ // add size of last <count> blocks to vector <sz> (or less, if blockchain size < count)
+ size_t start_offset = h - std::min<size_t>(h, count);
+ for(size_t i = start_offset; i < h; i++)
+ {
+ sz.push_back(m_db->get_block_size(i));
+ }
}
//------------------------------------------------------------------
uint64_t Blockchain::get_current_cumulative_blocksize_limit() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- return m_current_block_cumul_sz_limit;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ return m_current_block_cumul_sz_limit;
}
//------------------------------------------------------------------
//TODO: This function only needed minor modification to work with BlockchainDB,
@@ -997,159 +997,159 @@ uint64_t Blockchain::get_current_cumulative_blocksize_limit() const
// necessary at all.
bool Blockchain::create_block_template(block& b, const account_public_address& miner_address, difficulty_type& diffic, uint64_t& height, const blobdata& ex_nonce)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- size_t median_size;
- uint64_t already_generated_coins;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ size_t median_size;
+ uint64_t already_generated_coins;
- CRITICAL_REGION_BEGIN(m_blockchain_lock);
- height = m_db->height();
+ CRITICAL_REGION_BEGIN(m_blockchain_lock);
+ height = m_db->height();
- b.major_version = m_hardfork->get_current_version();
- b.minor_version = m_hardfork->get_ideal_version();
- b.prev_id = get_tail_id();
- b.timestamp = time(NULL);
+ b.major_version = m_hardfork->get_current_version();
+ b.minor_version = m_hardfork->get_ideal_version();
+ b.prev_id = get_tail_id();
+ b.timestamp = time(NULL);
- diffic = get_difficulty_for_next_block();
- CHECK_AND_ASSERT_MES(diffic, false, "difficulty owverhead.");
+ diffic = get_difficulty_for_next_block();
+ CHECK_AND_ASSERT_MES(diffic, false, "difficulty overhead.");
- median_size = m_current_block_cumul_sz_limit / 2;
- already_generated_coins = m_db->get_block_already_generated_coins(height - 1);
+ median_size = m_current_block_cumul_sz_limit / 2;
+ already_generated_coins = m_db->get_block_already_generated_coins(height - 1);
- CRITICAL_REGION_END();
+ CRITICAL_REGION_END();
- size_t txs_size;
- uint64_t fee;
- if (!m_tx_pool.fill_block_template(b, median_size, already_generated_coins, txs_size, fee))
- {
- return false;
- }
+ size_t txs_size;
+ uint64_t fee;
+ if (!m_tx_pool.fill_block_template(b, median_size, already_generated_coins, txs_size, fee))
+ {
+ return false;
+ }
#if defined(DEBUG_CREATE_BLOCK_TEMPLATE)
- size_t real_txs_size = 0;
- uint64_t real_fee = 0;
- CRITICAL_REGION_BEGIN(m_tx_pool.m_transactions_lock);
- BOOST_FOREACH(crypto::hash &cur_hash, b.tx_hashes)
- {
- auto cur_res = m_tx_pool.m_transactions.find(cur_hash);
- if (cur_res == m_tx_pool.m_transactions.end())
- {
- LOG_ERROR("Creating block template: error: transaction not found");
- continue;
- }
- tx_memory_pool::tx_details &cur_tx = cur_res->second;
- real_txs_size += cur_tx.blob_size;
- real_fee += cur_tx.fee;
- if (cur_tx.blob_size != get_object_blobsize(cur_tx.tx))
- {
- LOG_ERROR("Creating block template: error: invalid transaction size");
- }
- uint64_t inputs_amount;
- if (!get_inputs_money_amount(cur_tx.tx, inputs_amount))
- {
- LOG_ERROR("Creating block template: error: cannot get inputs amount");
- }
- else if (cur_tx.fee != inputs_amount - get_outs_money_amount(cur_tx.tx))
- {
- LOG_ERROR("Creating block template: error: invalid fee");
- }
- }
- if (txs_size != real_txs_size)
- {
- LOG_ERROR("Creating block template: error: wrongly calculated transaction size");
- }
- if (fee != real_fee)
- {
- LOG_ERROR("Creating block template: error: wrongly calculated fee");
- }
- CRITICAL_REGION_END();
- LOG_PRINT_L1("Creating block template: height " << height <<
- ", median size " << median_size <<
- ", already generated coins " << already_generated_coins <<
- ", transaction size " << txs_size <<
- ", fee " << fee);
+ size_t real_txs_size = 0;
+ uint64_t real_fee = 0;
+ CRITICAL_REGION_BEGIN(m_tx_pool.m_transactions_lock);
+ BOOST_FOREACH(crypto::hash &cur_hash, b.tx_hashes)
+ {
+ auto cur_res = m_tx_pool.m_transactions.find(cur_hash);
+ if (cur_res == m_tx_pool.m_transactions.end())
+ {
+ LOG_ERROR("Creating block template: error: transaction not found");
+ continue;
+ }
+ tx_memory_pool::tx_details &cur_tx = cur_res->second;
+ real_txs_size += cur_tx.blob_size;
+ real_fee += cur_tx.fee;
+ if (cur_tx.blob_size != get_object_blobsize(cur_tx.tx))
+ {
+ LOG_ERROR("Creating block template: error: invalid transaction size");
+ }
+ uint64_t inputs_amount;
+ if (!get_inputs_money_amount(cur_tx.tx, inputs_amount))
+ {
+ LOG_ERROR("Creating block template: error: cannot get inputs amount");
+ }
+ else if (cur_tx.fee != inputs_amount - get_outs_money_amount(cur_tx.tx))
+ {
+ LOG_ERROR("Creating block template: error: invalid fee");
+ }
+ }
+ if (txs_size != real_txs_size)
+ {
+ LOG_ERROR("Creating block template: error: wrongly calculated transaction size");
+ }
+ if (fee != real_fee)
+ {
+ LOG_ERROR("Creating block template: error: wrongly calculated fee");
+ }
+ CRITICAL_REGION_END();
+ LOG_PRINT_L1("Creating block template: height " << height <<
+ ", median size " << median_size <<
+ ", already generated coins " << already_generated_coins <<
+ ", transaction size " << txs_size <<
+ ", fee " << fee);
#endif
- /*
- two-phase miner transaction generation: we don't know exact block size until we prepare block, but we don't know reward until we know
- block size, so first miner transaction generated with fake amount of money, and with phase we know think we know expected block size
- */
- //make blocks coin-base tx looks close to real coinbase tx to get truthful blob size
- bool r = construct_miner_tx(height, median_size, already_generated_coins, txs_size, fee, miner_address, b.miner_tx, ex_nonce, 11, m_hardfork->get_current_version());
- CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, first chance");
- size_t cumulative_size = txs_size + get_object_blobsize(b.miner_tx);
+ /*
+ two-phase miner transaction generation: we don't know exact block size until we prepare block, but we don't know reward until we know
+ block size, so first miner transaction generated with fake amount of money, and with phase we know think we know expected block size
+ */
+ //make blocks coin-base tx looks close to real coinbase tx to get truthful blob size
+ bool r = construct_miner_tx(height, median_size, already_generated_coins, txs_size, fee, miner_address, b.miner_tx, ex_nonce, 11, m_hardfork->get_current_version());
+ CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, first chance");
+ size_t cumulative_size = txs_size + get_object_blobsize(b.miner_tx);
#if defined(DEBUG_CREATE_BLOCK_TEMPLATE)
- LOG_PRINT_L1("Creating block template: miner tx size " << get_object_blobsize(b.miner_tx) <<
- ", cumulative size " << cumulative_size);
+ LOG_PRINT_L1("Creating block template: miner tx size " << get_object_blobsize(b.miner_tx) <<
+ ", cumulative size " << cumulative_size);
#endif
- for (size_t try_count = 0; try_count != 10; ++try_count)
- {
- r = construct_miner_tx(height, median_size, already_generated_coins, cumulative_size, fee, miner_address, b.miner_tx, ex_nonce, 11, m_hardfork->get_current_version());
+ for (size_t try_count = 0; try_count != 10; ++try_count)
+ {
+ r = construct_miner_tx(height, median_size, already_generated_coins, cumulative_size, fee, miner_address, b.miner_tx, ex_nonce, 11, m_hardfork->get_current_version());
- CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, second chance");
- size_t coinbase_blob_size = get_object_blobsize(b.miner_tx);
- if (coinbase_blob_size > cumulative_size - txs_size)
- {
- cumulative_size = txs_size + coinbase_blob_size;
+ CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, second chance");
+ size_t coinbase_blob_size = get_object_blobsize(b.miner_tx);
+ if (coinbase_blob_size > cumulative_size - txs_size)
+ {
+ cumulative_size = txs_size + coinbase_blob_size;
#if defined(DEBUG_CREATE_BLOCK_TEMPLATE)
- LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size <<
- ", cumulative size " << cumulative_size << " is greater then before");
+ LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size <<
+ ", cumulative size " << cumulative_size << " is greater then before");
#endif
- continue;
- }
+ continue;
+ }
- if (coinbase_blob_size < cumulative_size - txs_size)
- {
- size_t delta = cumulative_size - txs_size - coinbase_blob_size;
+ if (coinbase_blob_size < cumulative_size - txs_size)
+ {
+ size_t delta = cumulative_size - txs_size - coinbase_blob_size;
#if defined(DEBUG_CREATE_BLOCK_TEMPLATE)
- LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size <<
- ", cumulative size " << txs_size + coinbase_blob_size <<
- " is less then before, adding " << delta << " zero bytes");
+ LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size <<
+ ", cumulative size " << txs_size + coinbase_blob_size <<
+ " is less then before, adding " << delta << " zero bytes");
#endif
- b.miner_tx.extra.insert(b.miner_tx.extra.end(), delta, 0);
- //here could be 1 byte difference, because of extra field counter is varint, and it can become from 1-byte len to 2-bytes len.
- if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx))
- {
- CHECK_AND_ASSERT_MES(cumulative_size + 1 == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " + 1 is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx));
- b.miner_tx.extra.resize(b.miner_tx.extra.size() - 1);
- if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx))
- {
- //fuck, not lucky, -1 makes varint-counter size smaller, in that case we continue to grow with cumulative_size
- LOG_PRINT_RED("Miner tx creation has no luck with delta_extra size = " << delta << " and " << delta - 1 , LOG_LEVEL_2);
- cumulative_size += delta - 1;
- continue;
- }
- LOG_PRINT_GREEN("Setting extra for block: " << b.miner_tx.extra.size() << ", try_count=" << try_count, LOG_LEVEL_1);
- }
- }
- CHECK_AND_ASSERT_MES(cumulative_size == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx));
+ b.miner_tx.extra.insert(b.miner_tx.extra.end(), delta, 0);
+ //here could be 1 byte difference, because of extra field counter is varint, and it can become from 1-byte len to 2-bytes len.
+ if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx))
+ {
+ CHECK_AND_ASSERT_MES(cumulative_size + 1 == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " + 1 is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx));
+ b.miner_tx.extra.resize(b.miner_tx.extra.size() - 1);
+ if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx))
+ {
+ //fuck, not lucky, -1 makes varint-counter size smaller, in that case we continue to grow with cumulative_size
+ LOG_PRINT_RED("Miner tx creation has no luck with delta_extra size = " << delta << " and " << delta - 1 , LOG_LEVEL_2);
+ cumulative_size += delta - 1;
+ continue;
+ }
+ LOG_PRINT_GREEN("Setting extra for block: " << b.miner_tx.extra.size() << ", try_count=" << try_count, LOG_LEVEL_1);
+ }
+ }
+ CHECK_AND_ASSERT_MES(cumulative_size == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx));
#if defined(DEBUG_CREATE_BLOCK_TEMPLATE)
- LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size <<
- ", cumulative size " << cumulative_size << " is now good");
+ LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size <<
+ ", cumulative size " << cumulative_size << " is now good");
#endif
- return true;
- }
- LOG_ERROR("Failed to create_block_template with " << 10 << " tries");
- return false;
+ return true;
+ }
+ LOG_ERROR("Failed to create_block_template with " << 10 << " tries");
+ return false;
}
//------------------------------------------------------------------
// for an alternate chain, get the timestamps from the main chain to complete
// the needed number of timestamps for the BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.
bool Blockchain::complete_timestamps_vector(uint64_t start_top_height, std::vector<uint64_t>& timestamps)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
-
- if(timestamps.size() >= BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)
- return true;
+ LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- size_t need_elements = BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW - timestamps.size();
- CHECK_AND_ASSERT_MES(start_top_height < m_db->height(), false, "internal error: passed start_height not < " << " m_db->height() -- " << start_top_height << " >= " << m_db->height());
- size_t stop_offset = start_top_height > need_elements ? start_top_height - need_elements : 0;
- while (start_top_height != stop_offset)
- {
- timestamps.push_back(m_db->get_block_timestamp(start_top_height));
- --start_top_height;
- }
+ if(timestamps.size() >= BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)
return true;
+
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ size_t need_elements = BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW - timestamps.size();
+ CHECK_AND_ASSERT_MES(start_top_height < m_db->height(), false, "internal error: passed start_height not < " << " m_db->height() -- " << start_top_height << " >= " << m_db->height());
+ size_t stop_offset = start_top_height > need_elements ? start_top_height - need_elements : 0;
+ while (start_top_height != stop_offset)
+ {
+ timestamps.push_back(m_db->get_block_timestamp(start_top_height));
+ --start_top_height;
+ }
+ return true;
}
//------------------------------------------------------------------
// If a block is to be added and its parent block is not the current
@@ -1160,215 +1160,214 @@ bool Blockchain::complete_timestamps_vector(uint64_t start_top_height, std::vect
// a long forked chain eventually.
bool Blockchain::handle_alternative_block(const block& b, const crypto::hash& id, block_verification_context& bvc)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- m_timestamps_and_difficulties_height = 0;
- uint64_t block_height = get_block_height(b);
- if(0 == block_height)
- {
- LOG_PRINT_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative), but miner tx says height is 0.");
- bvc.m_verifivation_failed = true;
- return false;
- }
- // this basically says if the blockchain is smaller than the first
- // checkpoint then alternate blocks are allowed. Alternatively, if the
- // last checkpoint *before* the end of the current chain is also before
- // the block to be added, then this is fine.
- if (!m_checkpoints.is_alternative_block_allowed(get_current_blockchain_height(), block_height))
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ m_timestamps_and_difficulties_height = 0;
+ uint64_t block_height = get_block_height(b);
+ if(0 == block_height)
+ {
+ LOG_PRINT_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative), but miner tx says height is 0.");
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
+ // this basically says if the blockchain is smaller than the first
+ // checkpoint then alternate blocks are allowed. Alternatively, if the
+ // last checkpoint *before* the end of the current chain is also before
+ // the block to be added, then this is fine.
+ if (!m_checkpoints.is_alternative_block_allowed(get_current_blockchain_height(), block_height))
+ {
+ LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " can't be accepted for alternative chain, block height: " << block_height << std::endl << " blockchain height: " << get_current_blockchain_height());
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
+
+ //block is not related with head of main chain
+ //first of all - look in alternative chains container
+ auto it_prev = m_alternative_chains.find(b.prev_id);
+ bool parent_in_main = m_db->block_exists(b.prev_id);
+ if(it_prev != m_alternative_chains.end() || parent_in_main)
+ {
+ //we have new block in alternative chain
+
+ //build alternative subchain, front -> mainchain, back -> alternative head
+ blocks_ext_by_hash::iterator alt_it = it_prev; //m_alternative_chains.find()
+ std::list<blocks_ext_by_hash::iterator> alt_chain;
+ std::vector<uint64_t> timestamps;
+ while(alt_it != m_alternative_chains.end())
{
- LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " can't be accepted for alternative chain, block height: " << block_height << std::endl << " blockchain height: " << get_current_blockchain_height());
- bvc.m_verifivation_failed = true;
- return false;
+ alt_chain.push_front(alt_it);
+ timestamps.push_back(alt_it->second.bl.timestamp);
+ alt_it = m_alternative_chains.find(alt_it->second.bl.prev_id);
}
- //block is not related with head of main chain
- //first of all - look in alternative chains container
- auto it_prev = m_alternative_chains.find(b.prev_id);
- bool parent_in_main = m_db->block_exists(b.prev_id);
- if(it_prev != m_alternative_chains.end() || parent_in_main)
+ // if block to be added connects to known blocks that aren't part of the
+ // main chain -- that is, if we're adding on to an alternate chain
+ if(alt_chain.size())
{
- //we have new block in alternative chain
-
- //build alternative subchain, front -> mainchain, back -> alternative head
- blocks_ext_by_hash::iterator alt_it = it_prev; //m_alternative_chains.find()
- std::list<blocks_ext_by_hash::iterator> alt_chain;
- std::vector<uint64_t> timestamps;
- while(alt_it != m_alternative_chains.end())
- {
- alt_chain.push_front(alt_it);
- timestamps.push_back(alt_it->second.bl.timestamp);
- alt_it = m_alternative_chains.find(alt_it->second.bl.prev_id);
- }
-
- // if block to be added connects to known blocks that aren't part of the
- // main chain -- that is, if we're adding on to an alternate chain
- if(alt_chain.size())
- {
- // make sure alt chain doesn't somehow start past the end of the main chain
- CHECK_AND_ASSERT_MES(m_db->height() > alt_chain.front()->second.height, false, "main blockchain wrong height");
+ // make sure alt chain doesn't somehow start past the end of the main chain
+ CHECK_AND_ASSERT_MES(m_db->height() > alt_chain.front()->second.height, false, "main blockchain wrong height");
- // make sure that the blockchain contains the block that should connect
- // this alternate chain with it.
- if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id))
- {
- LOG_PRINT_L1("alternate chain does not appear to connect to main chain...");
- return false;
- }
+ // make sure that the blockchain contains the block that should connect
+ // this alternate chain with it.
+ if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id))
+ {
+ LOG_PRINT_L1("alternate chain does not appear to connect to main chain...");
+ return false;
+ }
- // make sure block connects correctly to the main chain
- auto h = m_db->get_block_hash_from_height(alt_chain.front()->second.height - 1);
- CHECK_AND_ASSERT_MES(h == alt_chain.front()->second.bl.prev_id, false, "alternative chain has wrong connection to main chain");
- complete_timestamps_vector(m_db->get_block_height(alt_chain.front()->second.bl.prev_id), timestamps);
- }
- // if block not associated with known alternate chain
- else
- {
- // if block parent is not part of main chain or an alternate chain,
- // we ignore it
- CHECK_AND_ASSERT_MES(parent_in_main, false, "internal error: broken imperative condition it_main_prev != m_blocks_index.end()");
+ // make sure block connects correctly to the main chain
+ auto h = m_db->get_block_hash_from_height(alt_chain.front()->second.height - 1);
+ CHECK_AND_ASSERT_MES(h == alt_chain.front()->second.bl.prev_id, false, "alternative chain has wrong connection to main chain");
+ complete_timestamps_vector(m_db->get_block_height(alt_chain.front()->second.bl.prev_id), timestamps);
+ }
+ // if block not associated with known alternate chain
+ else
+ {
+ // if block parent is not part of main chain or an alternate chain,
+ // we ignore it
+ CHECK_AND_ASSERT_MES(parent_in_main, false, "internal error: broken imperative condition it_main_prev != m_blocks_index.end()");
- complete_timestamps_vector(m_db->get_block_height(b.prev_id), timestamps);
- }
+ complete_timestamps_vector(m_db->get_block_height(b.prev_id), timestamps);
+ }
- // verify that the block's timestamp is within the acceptable range
- // (not earlier than the median of the last X blocks)
- if(!check_block_timestamp(timestamps, b))
- {
- LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " for alternative chain, has invalid timestamp: " << b.timestamp);
- bvc.m_verifivation_failed = true;
- return false;
- }
+ // verify that the block's timestamp is within the acceptable range
+ // (not earlier than the median of the last X blocks)
+ if(!check_block_timestamp(timestamps, b))
+ {
+ LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " for alternative chain, has invalid timestamp: " << b.timestamp);
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
- // FIXME: consider moving away from block_extended_info at some point
- block_extended_info bei = boost::value_initialized<block_extended_info>();
- bei.bl = b;
- bei.height = alt_chain.size() ? it_prev->second.height + 1 : m_db->get_block_height(b.prev_id) + 1;
+ // FIXME: consider moving away from block_extended_info at some point
+ block_extended_info bei = boost::value_initialized<block_extended_info>();
+ bei.bl = b;
+ bei.height = alt_chain.size() ? it_prev->second.height + 1 : m_db->get_block_height(b.prev_id) + 1;
- bool is_a_checkpoint;
- if(!m_checkpoints.check_block(bei.height, id, is_a_checkpoint))
- {
- LOG_ERROR("CHECKPOINT VALIDATION FAILED");
- bvc.m_verifivation_failed = true;
- return false;
- }
+ bool is_a_checkpoint;
+ if(!m_checkpoints.check_block(bei.height, id, is_a_checkpoint))
+ {
+ LOG_ERROR("CHECKPOINT VALIDATION FAILED");
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
- // Check the block's hash against the difficulty target for its alt chain
- m_is_in_checkpoint_zone = false;
- difficulty_type current_diff = get_next_difficulty_for_alternative_chain(alt_chain, bei);
- CHECK_AND_ASSERT_MES(current_diff, false, "!!!!!!! DIFFICULTY OVERHEAD !!!!!!!");
- crypto::hash proof_of_work = null_hash;
- get_block_longhash(bei.bl, proof_of_work, bei.height);
- if(!check_hash(proof_of_work, current_diff))
- {
- LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " for alternative chain, does not have enough proof of work: " << proof_of_work << std::endl << " expected difficulty: " << current_diff);
- bvc.m_verifivation_failed = true;
- return false;
- }
+ // Check the block's hash against the difficulty target for its alt chain
+ m_is_in_checkpoint_zone = false;
+ difficulty_type current_diff = get_next_difficulty_for_alternative_chain(alt_chain, bei);
+ CHECK_AND_ASSERT_MES(current_diff, false, "!!!!!!! DIFFICULTY OVERHEAD !!!!!!!");
+ crypto::hash proof_of_work = null_hash;
+ get_block_longhash(bei.bl, proof_of_work, bei.height);
+ if(!check_hash(proof_of_work, current_diff))
+ {
+ LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " for alternative chain, does not have enough proof of work: " << proof_of_work << std::endl << " expected difficulty: " << current_diff);
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
- if(!prevalidate_miner_transaction(b, bei.height))
- {
- LOG_PRINT_RED_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative) has incorrect miner transaction.");
- bvc.m_verifivation_failed = true;
- return false;
+ if(!prevalidate_miner_transaction(b, bei.height))
+ {
+ LOG_PRINT_RED_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative) has incorrect miner transaction.");
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
- }
+ // FIXME:
+ // this brings up an interesting point: consider allowing to get block
+ // difficulty both by height OR by hash, not just height.
+ difficulty_type main_chain_cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->height() - 1);
+ if (alt_chain.size())
+ {
+ bei.cumulative_difficulty = it_prev->second.cumulative_difficulty;
+ }
+ else
+ {
+ // passed-in block's previous block's cumulative difficulty, found on the main chain
+ bei.cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->get_block_height(b.prev_id));
+ }
+ bei.cumulative_difficulty += current_diff;
- // FIXME:
- // this brings up an interesting point: consider allowing to get block
- // difficulty both by height OR by hash, not just height.
- difficulty_type main_chain_cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->height() - 1);
- if (alt_chain.size())
- {
- bei.cumulative_difficulty = it_prev->second.cumulative_difficulty;
- }
- else
- {
- // passed-in block's previous block's cumulative difficulty, found on the main chain
- bei.cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->get_block_height(b.prev_id));
- }
- bei.cumulative_difficulty += current_diff;
+ // add block to alternate blocks storage,
+ // as well as the current "alt chain" container
+ auto i_res = m_alternative_chains.insert(blocks_ext_by_hash::value_type(id, bei));
+ CHECK_AND_ASSERT_MES(i_res.second, false, "insertion of new alternative block returned as it already exist");
+ alt_chain.push_back(i_res.first);
- // add block to alternate blocks storage,
- // as well as the current "alt chain" container
- auto i_res = m_alternative_chains.insert(blocks_ext_by_hash::value_type(id, bei));
- CHECK_AND_ASSERT_MES(i_res.second, false, "insertion of new alternative block returned as it already exist");
- alt_chain.push_back(i_res.first);
+ // FIXME: is it even possible for a checkpoint to show up not on the main chain?
+ if(is_a_checkpoint)
+ {
+ //do reorganize!
+ LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << ", checkpoint is found in alternative chain on height " << bei.height, LOG_LEVEL_0);
- // FIXME: is it even possible for a checkpoint to show up not on the main chain?
- if(is_a_checkpoint)
- {
- //do reorganize!
- LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << ", checkpoint is found in alternative chain on height " << bei.height, LOG_LEVEL_0);
+ bool r = switch_to_alternative_blockchain(alt_chain, true);
- bool r = switch_to_alternative_blockchain(alt_chain, true);
+ bvc.m_added_to_main_chain = r;
+ bvc.m_verifivation_failed = !r;
- bvc.m_added_to_main_chain = r;
- bvc.m_verifivation_failed = !r;
+ return r;
+ }
+ else if(main_chain_cumulative_difficulty < bei.cumulative_difficulty) //check if difficulty bigger then in main chain
+ {
+ //do reorganize!
+ LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << " with cum_difficulty " << m_db->get_block_cumulative_difficulty(m_db->height() - 1) << std::endl << " alternative blockchain size: " << alt_chain.size() << " with cum_difficulty " << bei.cumulative_difficulty, LOG_LEVEL_0);
- return r;
- }
- else if(main_chain_cumulative_difficulty < bei.cumulative_difficulty) //check if difficulty bigger then in main chain
- {
- //do reorganize!
- LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << " with cum_difficulty " << m_db->get_block_cumulative_difficulty(m_db->height() - 1) << std::endl << " alternative blockchain size: " << alt_chain.size() << " with cum_difficulty " << bei.cumulative_difficulty, LOG_LEVEL_0);
-
- bool r = switch_to_alternative_blockchain(alt_chain, false);
- if (r)
- bvc.m_added_to_main_chain = true;
- else
- bvc.m_verifivation_failed = true;
- return r;
- }
- else
- {
- LOG_PRINT_BLUE("----- BLOCK ADDED AS ALTERNATIVE ON HEIGHT " << bei.height << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "difficulty:\t" << current_diff, LOG_LEVEL_0);
- return true;
- }
+ bool r = switch_to_alternative_blockchain(alt_chain, false);
+ if (r)
+ bvc.m_added_to_main_chain = true;
+ else
+ bvc.m_verifivation_failed = true;
+ return r;
}
else
{
- //block orphaned
- bvc.m_marked_as_orphaned = true;
- LOG_PRINT_RED_L1("Block recognized as orphaned and rejected, id = " << id);
+ LOG_PRINT_BLUE("----- BLOCK ADDED AS ALTERNATIVE ON HEIGHT " << bei.height << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "difficulty:\t" << current_diff, LOG_LEVEL_0);
+ return true;
}
+ }
+ else
+ {
+ //block orphaned
+ bvc.m_marked_as_orphaned = true;
+ LOG_PRINT_RED_L1("Block recognized as orphaned and rejected, id = " << id);
+ }
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::get_blocks(uint64_t start_offset, size_t count, std::list<block>& blocks, std::list<transaction>& txs) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- if(start_offset > m_db->height())
- return false;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ if(start_offset > m_db->height())
+ return false;
- if (!get_blocks(start_offset, count, blocks))
- {
- return false;
- }
+ if (!get_blocks(start_offset, count, blocks))
+ {
+ return false;
+ }
- for(const block& blk : blocks)
- {
- std::list<crypto::hash> missed_ids;
- get_transactions(blk.tx_hashes, txs, missed_ids);
- CHECK_AND_ASSERT_MES(!missed_ids.size(), false, "has missed transactions in own block in main blockchain");
- }
+ for(const block& blk : blocks)
+ {
+ std::list<crypto::hash> missed_ids;
+ get_transactions(blk.tx_hashes, txs, missed_ids);
+ CHECK_AND_ASSERT_MES(!missed_ids.size(), false, "has missed transactions in own block in main blockchain");
+ }
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::get_blocks(uint64_t start_offset, size_t count, std::list<block>& blocks) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- if(start_offset > m_db->height())
- return false;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ if(start_offset > m_db->height())
+ return false;
- for(size_t i = start_offset; i < start_offset + count && i < m_db->height();i++)
- {
- blocks.push_back(m_db->get_block_from_height(i));
- }
- return true;
+ for(size_t i = start_offset; i < start_offset + count && i < m_db->height();i++)
+ {
+ blocks.push_back(m_db->get_block_from_height(i));
+ }
+ return true;
}
//------------------------------------------------------------------
//TODO: This function *looks* like it won't need to be rewritten
@@ -1376,68 +1375,67 @@ bool Blockchain::get_blocks(uint64_t start_offset, size_t count, std::list<block
// but it warrants some looking into later.
bool Blockchain::handle_get_objects(NOTIFY_REQUEST_GET_OBJECTS::request& arg, NOTIFY_RESPONSE_GET_OBJECTS::request& rsp)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- rsp.current_blockchain_height = get_current_blockchain_height();
- std::list<block> blocks;
- get_blocks(arg.blocks, blocks, rsp.missed_ids);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ rsp.current_blockchain_height = get_current_blockchain_height();
+ std::list<block> blocks;
+ get_blocks(arg.blocks, blocks, rsp.missed_ids);
- BOOST_FOREACH(const auto& bl, blocks)
- {
- std::list<crypto::hash> missed_tx_id;
- std::list<transaction> txs;
- get_transactions(bl.tx_hashes, txs, rsp.missed_ids);
- CHECK_AND_ASSERT_MES(!missed_tx_id.size(), false, "Internal error: has missed missed_tx_id.size()=" << missed_tx_id.size()
- << std::endl << "for block id = " << get_block_hash(bl));
- rsp.blocks.push_back(block_complete_entry());
- block_complete_entry& e = rsp.blocks.back();
- //pack block
- e.block = t_serializable_object_to_blob(bl);
- //pack transactions
- BOOST_FOREACH(transaction& tx, txs)
- e.txs.push_back(t_serializable_object_to_blob(tx));
-
- }
- //get another transactions, if need
+ BOOST_FOREACH(const auto& bl, blocks)
+ {
+ std::list<crypto::hash> missed_tx_id;
std::list<transaction> txs;
- get_transactions(arg.txs, txs, rsp.missed_ids);
- //pack aside transactions
- BOOST_FOREACH(const auto& tx, txs)
+ get_transactions(bl.tx_hashes, txs, rsp.missed_ids);
+ CHECK_AND_ASSERT_MES(!missed_tx_id.size(), false, "Internal error: has missed missed_tx_id.size()=" << missed_tx_id.size()
+ << std::endl << "for block id = " << get_block_hash(bl));
+ rsp.blocks.push_back(block_complete_entry());
+ block_complete_entry& e = rsp.blocks.back();
+ //pack block
+ e.block = t_serializable_object_to_blob(bl);
+ //pack transactions
+ BOOST_FOREACH(transaction& tx, txs)
+ e.txs.push_back(t_serializable_object_to_blob(tx));
+ }
+ //get another transactions, if need
+ std::list<transaction> txs;
+ get_transactions(arg.txs, txs, rsp.missed_ids);
+ //pack aside transactions
+ BOOST_FOREACH(const auto& tx, txs)
rsp.txs.push_back(t_serializable_object_to_blob(tx));
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::get_alternative_blocks(std::list<block>& blocks) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- BOOST_FOREACH(const auto& alt_bl, m_alternative_chains)
- {
- blocks.push_back(alt_bl.second.bl);
- }
- return true;
+ BOOST_FOREACH(const auto& alt_bl, m_alternative_chains)
+ {
+ blocks.push_back(alt_bl.second.bl);
+ }
+ return true;
}
//------------------------------------------------------------------
size_t Blockchain::get_alternative_blocks_count() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- return m_alternative_chains.size();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ return m_alternative_chains.size();
}
//------------------------------------------------------------------
// This function adds the output specified by <amount, i> to the result_outs container
// unlocked and other such checks should be done by here.
void Blockchain::add_out_to_get_random_outs(COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs, uint64_t amount, size_t i) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry& oen = *result_outs.outs.insert(result_outs.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry());
- oen.global_amount_index = i;
- output_data_t data = m_db->get_output_key(amount, i);
- oen.out_key = data.pubkey;
+ COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry& oen = *result_outs.outs.insert(result_outs.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry());
+ oen.global_amount_index = i;
+ output_data_t data = m_db->get_output_key(amount, i);
+ oen.out_key = data.pubkey;
}
//------------------------------------------------------------------
// This function takes an RPC request for mixins and creates an RPC response
@@ -1446,90 +1444,90 @@ void Blockchain::add_out_to_get_random_outs(COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_A
// in some cases
bool Blockchain::get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::request& req, COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::response& res) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+
+ // for each amount that we need to get mixins for, get <n> random outputs
+ // from BlockchainDB where <n> is req.outs_count (number of mixins).
+ for (uint64_t amount : req.amounts)
+ {
+ auto num_outs = m_db->get_num_outputs(amount);
+ // ensure we don't include outputs that aren't yet eligible to be used
+ // outpouts are sorted by height
+ while (num_outs > 0)
+ {
+ const tx_out_index toi = m_db->get_output_tx_and_index(amount, num_outs - 1);
+ const uint64_t height = m_db->get_tx_block_height(toi.first);
+ if (height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE <= m_db->height())
+ break;
+ --num_outs;
+ }
+
+ // create outs_for_amount struct and populate amount field
+ COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs = *res.outs.insert(res.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount());
+ result_outs.amount = amount;
+
+ std::unordered_set<uint64_t> seen_indices;
- // for each amount that we need to get mixins for, get <n> random outputs
- // from BlockchainDB where <n> is req.outs_count (number of mixins).
- for (uint64_t amount : req.amounts)
+ // if there aren't enough outputs to mix with (or just enough),
+ // use all of them. Eventually this should become impossible.
+ if (num_outs <= req.outs_count)
{
- auto num_outs = m_db->get_num_outputs(amount);
- // ensure we don't include outputs that aren't yet eligible to be used
- // outpouts are sorted by height
- while (num_outs > 0)
+ for (uint64_t i = 0; i < num_outs; i++)
+ {
+ // get tx_hash, tx_out_index from DB
+ tx_out_index toi = m_db->get_output_tx_and_index(amount, i);
+
+ // if tx is unlocked, add output to result_outs
+ if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first)))
{
- const tx_out_index toi = m_db->get_output_tx_and_index(amount, num_outs - 1);
- const uint64_t height = m_db->get_tx_block_height(toi.first);
- if (height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE <= m_db->height())
- break;
- --num_outs;
+ add_out_to_get_random_outs(result_outs, amount, i);
+ }
+
+ }
+ }
+ else
+ {
+ // while we still need more mixins
+ while (result_outs.outs.size() < req.outs_count)
+ {
+ // if we've gone through every possible output, we've gotten all we can
+ if (seen_indices.size() == num_outs)
+ {
+ break;
}
- // create outs_for_amount struct and populate amount field
- COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs = *res.outs.insert(res.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount());
- result_outs.amount = amount;
+ // get a random output index from the DB. If we've already seen it,
+ // return to the top of the loop and try again, otherwise add it to the
+ // list of output indices we've seen.
- std::unordered_set<uint64_t> seen_indices;
+ // triangular distribution over [a,b) with a=0, mode c=b=up_index_limit
+ uint64_t r = crypto::rand<uint64_t>() % ((uint64_t)1 << 53);
+ double frac = std::sqrt((double)r / ((uint64_t)1 << 53));
+ uint64_t i = (uint64_t)(frac*num_outs);
+ // just in case rounding up to 1 occurs after sqrt
+ if (i == num_outs)
+ --i;
- // if there aren't enough outputs to mix with (or just enough),
- // use all of them. Eventually this should become impossible.
- if (num_outs <= req.outs_count)
+ if (seen_indices.count(i))
{
- for (uint64_t i = 0; i < num_outs; i++)
- {
- // get tx_hash, tx_out_index from DB
- tx_out_index toi = m_db->get_output_tx_and_index(amount, i);
+ continue;
+ }
+ seen_indices.emplace(i);
- // if tx is unlocked, add output to result_outs
- if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first)))
- {
- add_out_to_get_random_outs(result_outs, amount, i);
- }
+ // get tx_hash, tx_out_index from DB
+ tx_out_index toi = m_db->get_output_tx_and_index(amount, i);
- }
- }
- else
+ // if the output's transaction is unlocked, add the output's index to
+ // our list.
+ if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first)))
{
- // while we still need more mixins
- while (result_outs.outs.size() < req.outs_count)
- {
- // if we've gone through every possible output, we've gotten all we can
- if (seen_indices.size() == num_outs)
- {
- break;
- }
-
- // get a random output index from the DB. If we've already seen it,
- // return to the top of the loop and try again, otherwise add it to the
- // list of output indices we've seen.
-
- // triangular distribution over [a,b) with a=0, mode c=b=up_index_limit
- uint64_t r = crypto::rand<uint64_t>() % ((uint64_t)1 << 53);
- double frac = std::sqrt((double)r / ((uint64_t)1 << 53));
- uint64_t i = (uint64_t)(frac*num_outs);
- // just in case rounding up to 1 occurs after sqrt
- if (i == num_outs)
- --i;
-
- if (seen_indices.count(i))
- {
- continue;
- }
- seen_indices.emplace(i);
-
- // get tx_hash, tx_out_index from DB
- tx_out_index toi = m_db->get_output_tx_and_index(amount, i);
-
- // if the output's transaction is unlocked, add the output's index to
- // our list.
- if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first)))
- {
- add_out_to_get_random_outs(result_outs, amount, i);
- }
- }
+ add_out_to_get_random_outs(result_outs, amount, i);
}
+ }
}
- return true;
+ }
+ return true;
}
//------------------------------------------------------------------
// This function takes a list of block hashes from another node
@@ -1537,175 +1535,175 @@ bool Blockchain::get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDOM_OUTPUT
// This is used to see what to send another node that needs to sync.
bool Blockchain::find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, uint64_t& starter_offset) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // make sure the request includes at least the genesis block, otherwise
- // how can we expect to sync from the client that the block list came from?
- if(!qblock_ids.size() /*|| !req.m_total_height*/)
+ // make sure the request includes at least the genesis block, otherwise
+ // how can we expect to sync from the client that the block list came from?
+ if(!qblock_ids.size() /*|| !req.m_total_height*/)
+ {
+ LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << qblock_ids.size() << /*", m_height=" << req.m_total_height <<*/ ", dropping connection");
+ return false;
+ }
+
+ // make sure that the last block in the request's block list matches
+ // the genesis block
+ auto gen_hash = m_db->get_block_hash_from_height(0);
+ if(qblock_ids.back() != gen_hash)
+ {
+ LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: genesis block missmatch: " << std::endl << "id: " << qblock_ids.back() << ", " << std::endl << "expected: " << gen_hash << "," << std::endl << " dropping connection");
+ return false;
+ }
+
+ // Find the first block the foreign chain has that we also have.
+ // Assume qblock_ids is in reverse-chronological order.
+ auto bl_it = qblock_ids.begin();
+ uint64_t split_height = 0;
+ for(; bl_it != qblock_ids.end(); bl_it++)
+ {
+ try
{
- LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << qblock_ids.size() << /*", m_height=" << req.m_total_height <<*/ ", dropping connection");
- return false;
+ split_height = m_db->get_block_height(*bl_it);
+ break;
}
-
- // make sure that the last block in the request's block list matches
- // the genesis block
- auto gen_hash = m_db->get_block_hash_from_height(0);
- if(qblock_ids.back() != gen_hash)
+ catch (const BLOCK_DNE& e)
{
- LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: genesis block missmatch: " << std::endl << "id: " << qblock_ids.back() << ", " << std::endl << "expected: " << gen_hash << "," << std::endl << " dropping connection");
- return false;
+ continue;
}
-
- // Find the first block the foreign chain has that we also have.
- // Assume qblock_ids is in reverse-chronological order.
- auto bl_it = qblock_ids.begin();
- uint64_t split_height = 0;
- for(; bl_it != qblock_ids.end(); bl_it++)
+ catch (const std::exception& e)
{
- try
- {
- split_height = m_db->get_block_height(*bl_it);
- break;
- }
- catch (const BLOCK_DNE& e)
- {
- continue;
- }
- catch (const std::exception& e)
- {
- LOG_PRINT_L1("Non-critical error trying to find block by hash in BlockchainDB, hash: " << *bl_it);
- return false;
- }
+ LOG_PRINT_L1("Non-critical error trying to find block by hash in BlockchainDB, hash: " << *bl_it);
+ return false;
}
+ }
- // this should be impossible, as we checked that we share the genesis block,
- // but just in case...
- if(bl_it == qblock_ids.end())
- {
- LOG_PRINT_L1("Internal error handling connection, can't find split point");
- return false;
- }
+ // this should be impossible, as we checked that we share the genesis block,
+ // but just in case...
+ if(bl_it == qblock_ids.end())
+ {
+ LOG_PRINT_L1("Internal error handling connection, can't find split point");
+ return false;
+ }
- // if split_height remains 0, we didn't have any but the genesis block in common
- // which is only fine if the blocks just have the genesis block
- if(split_height == 0 && qblock_ids.size() > 1)
- {
- LOG_ERROR("Ours and foreign blockchain have only genesis block in common... o.O");
- return false;
- }
+ // if split_height remains 0, we didn't have any but the genesis block in common
+ // which is only fine if the blocks just have the genesis block
+ if(split_height == 0 && qblock_ids.size() > 1)
+ {
+ LOG_ERROR("Ours and foreign blockchain have only genesis block in common... o.O");
+ return false;
+ }
- //we start to put block ids INCLUDING last known id, just to make other side be sure
- starter_offset = split_height;
- return true;
+ //we start to put block ids INCLUDING last known id, just to make other side be sure
+ starter_offset = split_height;
+ return true;
}
//------------------------------------------------------------------
uint64_t Blockchain::block_difficulty(uint64_t i) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- try
- {
- return m_db->get_block_difficulty(i);
- }
- catch (const BLOCK_DNE& e)
- {
- LOG_PRINT_L0("Attempted to get block difficulty for height above blockchain height");
- }
- return 0;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ try
+ {
+ return m_db->get_block_difficulty(i);
+ }
+ catch (const BLOCK_DNE& e)
+ {
+ LOG_PRINT_L0("Attempted to get block difficulty for height above blockchain height");
+ }
+ return 0;
}
//------------------------------------------------------------------
template<class t_ids_container, class t_blocks_container, class t_missed_container>
bool Blockchain::get_blocks(const t_ids_container& block_ids, t_blocks_container& blocks, t_missed_container& missed_bs) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- for (const auto& block_hash : block_ids)
+ for (const auto& block_hash : block_ids)
+ {
+ try
{
- try
- {
- blocks.push_back(m_db->get_block(block_hash));
- }
- catch (const BLOCK_DNE& e)
- {
- missed_bs.push_back(block_hash);
- }
- catch (const std::exception& e)
- {
- return false;
- }
+ blocks.push_back(m_db->get_block(block_hash));
}
- return true;
+ catch (const BLOCK_DNE& e)
+ {
+ missed_bs.push_back(block_hash);
+ }
+ catch (const std::exception& e)
+ {
+ return false;
+ }
+ }
+ return true;
}
//------------------------------------------------------------------
template<class t_ids_container, class t_tx_container, class t_missed_container>
bool Blockchain::get_transactions(const t_ids_container& txs_ids, t_tx_container& txs, t_missed_container& missed_txs) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- for (const auto& tx_hash : txs_ids)
+ for (const auto& tx_hash : txs_ids)
+ {
+ try
{
- try
- {
- txs.push_back(m_db->get_tx(tx_hash));
- }
- catch (const TX_DNE& e)
- {
- missed_txs.push_back(tx_hash);
- }
- //FIXME: is this the correct way to handle this?
- catch (const std::exception& e)
- {
- return false;
- }
+ txs.push_back(m_db->get_tx(tx_hash));
}
- return true;
+ catch (const TX_DNE& e)
+ {
+ missed_txs.push_back(tx_hash);
+ }
+ //FIXME: is this the correct way to handle this?
+ catch (const std::exception& e)
+ {
+ return false;
+ }
+ }
+ return true;
}
//------------------------------------------------------------------
void Blockchain::print_blockchain(uint64_t start_index, uint64_t end_index) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- std::stringstream ss;
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- auto h = m_db->height();
- if(start_index > h)
- {
- LOG_PRINT_L1("Wrong starter index set: " << start_index << ", expected max index " << h);
- return;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ std::stringstream ss;
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ auto h = m_db->height();
+ if(start_index > h)
+ {
+ LOG_PRINT_L1("Wrong starter index set: " << start_index << ", expected max index " << h);
+ return;
+ }
- for(size_t i = start_index; i <= h && i != end_index; i++)
- {
- ss << "height " << i << ", timestamp " << m_db->get_block_timestamp(i) << ", cumul_dif " << m_db->get_block_cumulative_difficulty(i) << ", size " << m_db->get_block_size(i) << "\nid\t\t" << m_db->get_block_hash_from_height(i) << "\ndifficulty\t\t" << m_db->get_block_difficulty(i) << ", nonce " << m_db->get_block_from_height(i).nonce << ", tx_count " << m_db->get_block_from_height(i).tx_hashes.size() << std::endl;
- }
- LOG_PRINT_L1("Current blockchain:" << std::endl << ss.str());
- LOG_PRINT_L0("Blockchain printed with log level 1");
+ for(size_t i = start_index; i <= h && i != end_index; i++)
+ {
+ ss << "height " << i << ", timestamp " << m_db->get_block_timestamp(i) << ", cumul_dif " << m_db->get_block_cumulative_difficulty(i) << ", size " << m_db->get_block_size(i) << "\nid\t\t" << m_db->get_block_hash_from_height(i) << "\ndifficulty\t\t" << m_db->get_block_difficulty(i) << ", nonce " << m_db->get_block_from_height(i).nonce << ", tx_count " << m_db->get_block_from_height(i).tx_hashes.size() << std::endl;
+ }
+ LOG_PRINT_L1("Current blockchain:" << std::endl << ss.str());
+ LOG_PRINT_L0("Blockchain printed with log level 1");
}
//------------------------------------------------------------------
void Blockchain::print_blockchain_index() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- std::stringstream ss;
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- auto height = m_db->height();
- if (height != 0)
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ std::stringstream ss;
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ auto height = m_db->height();
+ if (height != 0)
+ {
+ for(uint64_t i = 0; i <= height; i++)
{
- for(uint64_t i = 0; i <= height; i++)
- {
- ss << "height: " << i << ", hash: " << m_db->get_block_hash_from_height(i);
- }
+ ss << "height: " << i << ", hash: " << m_db->get_block_hash_from_height(i);
}
+ }
- LOG_PRINT_L0("Current blockchain index:" << std::endl << ss.str());
+ LOG_PRINT_L0("Current blockchain index:" << std::endl << ss.str());
}
//------------------------------------------------------------------
//TODO: remove this function and references to it
void Blockchain::print_blockchain_outs(const std::string& file) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- return;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ return;
}
//------------------------------------------------------------------
// Find the split point between us and foreign blockchain and return
@@ -1713,22 +1711,22 @@ void Blockchain::print_blockchain_outs(const std::string& file) const
// BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT additional (more recent) hashes.
bool Blockchain::find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, NOTIFY_RESPONSE_CHAIN_ENTRY::request& resp) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // if we can't find the split point, return false
- if(!find_blockchain_supplement(qblock_ids, resp.start_height))
- {
- return false;
- }
+ // if we can't find the split point, return false
+ if(!find_blockchain_supplement(qblock_ids, resp.start_height))
+ {
+ return false;
+ }
- resp.total_height = get_current_blockchain_height();
- size_t count = 0;
- for(size_t i = resp.start_height; i < resp.total_height && count < BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT; i++, count++)
- {
- resp.m_block_ids.push_back(m_db->get_block_hash_from_height(i));
- }
- return true;
+ resp.total_height = get_current_blockchain_height();
+ size_t count = 0;
+ for(size_t i = resp.start_height; i < resp.total_height && count < BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT; i++, count++)
+ {
+ resp.m_block_ids.push_back(m_db->get_block_hash_from_height(i));
+ }
+ return true;
}
//------------------------------------------------------------------
//FIXME: change argument to std::vector, low priority
@@ -1737,82 +1735,82 @@ bool Blockchain::find_blockchain_supplement(const std::list<crypto::hash>& qbloc
// blocks by reference.
bool Blockchain::find_blockchain_supplement(const uint64_t req_start_block, const std::list<crypto::hash>& qblock_ids, std::list<std::pair<block, std::list<transaction> > >& blocks, uint64_t& total_height, uint64_t& start_height, size_t max_count) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // if a specific start height has been requested
- if(req_start_block > 0)
+ // if a specific start height has been requested
+ if(req_start_block > 0)
+ {
+ // if requested height is higher than our chain, return false -- we can't help
+ if (req_start_block >= m_db->height())
{
- // if requested height is higher than our chain, return false -- we can't help
- if (req_start_block >= m_db->height())
- {
- return false;
- }
- start_height = req_start_block;
+ return false;
}
- else
+ start_height = req_start_block;
+ }
+ else
+ {
+ if(!find_blockchain_supplement(qblock_ids, start_height))
{
- if(!find_blockchain_supplement(qblock_ids, start_height))
- {
- return false;
- }
+ return false;
}
+ }
- total_height = get_current_blockchain_height();
- size_t count = 0;
- for(size_t i = start_height; i < total_height && count < max_count; i++, count++)
- {
- blocks.resize(blocks.size()+1);
- blocks.back().first = m_db->get_block_from_height(i);
- std::list<crypto::hash> mis;
- get_transactions(blocks.back().first.tx_hashes, blocks.back().second, mis);
- CHECK_AND_ASSERT_MES(!mis.size(), false, "internal error, transaction from block not found");
- }
- return true;
+ total_height = get_current_blockchain_height();
+ size_t count = 0;
+ for(size_t i = start_height; i < total_height && count < max_count; i++, count++)
+ {
+ blocks.resize(blocks.size()+1);
+ blocks.back().first = m_db->get_block_from_height(i);
+ std::list<crypto::hash> mis;
+ get_transactions(blocks.back().first.tx_hashes, blocks.back().second, mis);
+ CHECK_AND_ASSERT_MES(!mis.size(), false, "internal error, transaction from block not found");
+ }
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::add_block_as_invalid(const block& bl, const crypto::hash& h)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- block_extended_info bei = AUTO_VAL_INIT(bei);
- bei.bl = bl;
- return add_block_as_invalid(bei, h);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ block_extended_info bei = AUTO_VAL_INIT(bei);
+ bei.bl = bl;
+ return add_block_as_invalid(bei, h);
}
//------------------------------------------------------------------
bool Blockchain::add_block_as_invalid(const block_extended_info& bei, const crypto::hash& h)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- auto i_res = m_invalid_blocks.insert(std::map<crypto::hash, block_extended_info>::value_type(h, bei));
- CHECK_AND_ASSERT_MES(i_res.second, false, "at insertion invalid by tx returned status existed");
- LOG_PRINT_L1("BLOCK ADDED AS INVALID: " << h << std::endl << ", prev_id=" << bei.bl.prev_id << ", m_invalid_blocks count=" << m_invalid_blocks.size());
- return true;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ auto i_res = m_invalid_blocks.insert(std::map<crypto::hash, block_extended_info>::value_type(h, bei));
+ CHECK_AND_ASSERT_MES(i_res.second, false, "at insertion invalid by tx returned status existed");
+ LOG_PRINT_L1("BLOCK ADDED AS INVALID: " << h << std::endl << ", prev_id=" << bei.bl.prev_id << ", m_invalid_blocks count=" << m_invalid_blocks.size());
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::have_block(const crypto::hash& id) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- if(m_db->block_exists(id))
- {
- LOG_PRINT_L3("block exists in main chain");
- return true;
- }
+ if(m_db->block_exists(id))
+ {
+ LOG_PRINT_L3("block exists in main chain");
+ return true;
+ }
- if(m_alternative_chains.count(id))
- {
- LOG_PRINT_L3("block found in m_alternative_chains");
- return true;
- }
+ if(m_alternative_chains.count(id))
+ {
+ LOG_PRINT_L3("block found in m_alternative_chains");
+ return true;
+ }
- if(m_invalid_blocks.count(id))
- {
- LOG_PRINT_L3("block found in m_invalid_blocks");
- return true;
- }
+ if(m_invalid_blocks.count(id))
+ {
+ LOG_PRINT_L3("block found in m_invalid_blocks");
+ return true;
+ }
- return false;
+ return false;
}
//------------------------------------------------------------------
bool Blockchain::handle_block_to_main_chain(const block& bl, block_verification_context& bvc)
@@ -1824,9 +1822,9 @@ bool Blockchain::handle_block_to_main_chain(const block& bl, block_verification_
//------------------------------------------------------------------
size_t Blockchain::get_total_transactions() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- return m_db->get_tx_count();
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ return m_db->get_tx_count();
}
//------------------------------------------------------------------
// This function checks each input in the transaction <tx> to make sure it
@@ -1837,79 +1835,79 @@ size_t Blockchain::get_total_transactions() const
// remove them later if the block fails validation.
bool Blockchain::check_for_double_spend(const transaction& tx, key_images_container& keys_this_block) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- struct add_transaction_input_visitor: public boost::static_visitor<bool>
- {
- key_images_container& m_spent_keys;
- BlockchainDB* m_db;
- add_transaction_input_visitor(key_images_container& spent_keys, BlockchainDB* db) :
- m_spent_keys(spent_keys), m_db(db)
- {
- }
- bool operator()(const txin_to_key& in) const
- {
- const crypto::key_image& ki = in.k_image;
-
- // attempt to insert the newly-spent key into the container of
- // keys spent this block. If this fails, the key was spent already
- // in this block, return false to flag that a double spend was detected.
- //
- // if the insert into the block-wide spent keys container succeeds,
- // check the blockchain-wide spent keys container and make sure the
- // key wasn't used in another block already.
- auto r = m_spent_keys.insert(ki);
- if(!r.second || m_db->has_key_image(ki))
- {
- //double spend detected
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ struct add_transaction_input_visitor: public boost::static_visitor<bool>
+ {
+ key_images_container& m_spent_keys;
+ BlockchainDB* m_db;
+ add_transaction_input_visitor(key_images_container& spent_keys, BlockchainDB* db) :
+ m_spent_keys(spent_keys), m_db(db)
+ {
+ }
+ bool operator()(const txin_to_key& in) const
+ {
+ const crypto::key_image& ki = in.k_image;
+
+ // attempt to insert the newly-spent key into the container of
+ // keys spent this block. If this fails, the key was spent already
+ // in this block, return false to flag that a double spend was detected.
+ //
+ // if the insert into the block-wide spent keys container succeeds,
+ // check the blockchain-wide spent keys container and make sure the
+ // key wasn't used in another block already.
+ auto r = m_spent_keys.insert(ki);
+ if(!r.second || m_db->has_key_image(ki))
+ {
+ //double spend detected
+ return false;
+ }
- // if no double-spend detected, return true
- return true;
- }
+ // if no double-spend detected, return true
+ return true;
+ }
- bool operator()(const txin_gen& tx) const
- {
- return true;
- }
- bool operator()(const txin_to_script& tx) const
- {
- return false;
- }
- bool operator()(const txin_to_scripthash& tx) const
- {
- return false;
- }
- };
+ bool operator()(const txin_gen& tx) const
+ {
+ return true;
+ }
+ bool operator()(const txin_to_script& tx) const
+ {
+ return false;
+ }
+ bool operator()(const txin_to_scripthash& tx) const
+ {
+ return false;
+ }
+ };
- for (const txin_v& in : tx.vin)
+ for (const txin_v& in : tx.vin)
+ {
+ if(!boost::apply_visitor(add_transaction_input_visitor(keys_this_block, m_db), in))
{
- if(!boost::apply_visitor(add_transaction_input_visitor(keys_this_block, m_db), in))
- {
- LOG_ERROR("Double spend detected!");
- return false;
- }
+ LOG_ERROR("Double spend detected!");
+ return false;
}
+ }
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector<uint64_t>& indexs) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- if (!m_db->tx_exists(tx_id))
- {
- LOG_PRINT_RED_L1("warning: get_tx_outputs_gindexs failed to find transaction with id = " << tx_id);
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ if (!m_db->tx_exists(tx_id))
+ {
+ LOG_PRINT_RED_L1("warning: get_tx_outputs_gindexs failed to find transaction with id = " << tx_id);
+ return false;
+ }
- // get amount output indexes, currently referred to in parts as "output global indices", but they are actually specific to amounts
- indexs = m_db->get_tx_amount_output_indices(tx_id);
- CHECK_AND_ASSERT_MES(indexs.size(), false, "internal error: global indexes for transaction " << tx_id << " is empty");
+ // get amount output indexes, currently referred to in parts as "output global indices", but they are actually specific to amounts
+ indexs = m_db->get_tx_amount_output_indices(tx_id);
+ CHECK_AND_ASSERT_MES(indexs.size(), false, "internal error: global indexes for transaction " << tx_id << " is empty");
- return true;
+ return true;
}
//------------------------------------------------------------------
// This function overloads its sister function with
@@ -1917,71 +1915,71 @@ bool Blockchain::get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector<u
// as a return-by-reference.
bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t& max_used_block_height, crypto::hash& max_used_block_id, bool kept_by_block)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
#if defined(PER_BLOCK_CHECKPOINT)
- // check if we're doing per-block checkpointing
- if (m_db->height() < m_blocks_hash_check.size() && kept_by_block)
- {
- TIME_MEASURE_START(a);
- m_blocks_txs_check.push_back(get_transaction_hash(tx));
- max_used_block_id = null_hash;
- max_used_block_height = 0;
- TIME_MEASURE_FINISH(a);
- if(m_show_time_stats)
- LOG_PRINT_L0("HASH: " << "-" << " VIN/VOUT: " << tx.vin.size() << "/" << tx.vout.size() << " H: " << 0 << " chcktx: " << a);
- return true;
- }
-#endif
-
+ // check if we're doing per-block checkpointing
+ if (m_db->height() < m_blocks_hash_check.size() && kept_by_block)
+ {
TIME_MEASURE_START(a);
- bool res = check_tx_inputs(tx, &max_used_block_height);
+ m_blocks_txs_check.push_back(get_transaction_hash(tx));
+ max_used_block_id = null_hash;
+ max_used_block_height = 0;
TIME_MEASURE_FINISH(a);
- crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx);
if(m_show_time_stats)
- LOG_PRINT_L0("HASH: " << "+" << " VIN/VOUT: " << tx.vin.size() << "/" << tx.vout.size() << " H: " << max_used_block_height << " chcktx: " << a + m_fake_scan_time);
+ LOG_PRINT_L0("HASH: " << "-" << " VIN/VOUT: " << tx.vin.size() << "/" << tx.vout.size() << " H: " << 0 << " chcktx: " << a);
+ return true;
+ }
+#endif
- if (!res)
- return false;
+ TIME_MEASURE_START(a);
+ bool res = check_tx_inputs(tx, &max_used_block_height);
+ TIME_MEASURE_FINISH(a);
+ crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx);
+ if(m_show_time_stats)
+ LOG_PRINT_L0("HASH: " << "+" << " VIN/VOUT: " << tx.vin.size() << "/" << tx.vout.size() << " H: " << max_used_block_height << " chcktx: " << a + m_fake_scan_time);
- // ND: Speedup:
- // 1. keep a list of verified transactions, when the Blockchain tries to check a tx again,
- // verify against list and skip if already verified to be correct.
- m_check_tx_inputs_table.emplace(tx_prefix_hash, std::make_pair(res, max_used_block_height));
+ if (!res)
+ return false;
- CHECK_AND_ASSERT_MES(max_used_block_height < m_db->height(), false, "internal error: max used block index=" << max_used_block_height << " is not less then blockchain size = " << m_db->height());
- max_used_block_id = m_db->get_block_hash_from_height(max_used_block_height);
- return true;
+ // ND: Speedup:
+ // 1. keep a list of verified transactions, when the Blockchain tries to check a tx again,
+ // verify against list and skip if already verified to be correct.
+ m_check_tx_inputs_table.emplace(tx_prefix_hash, std::make_pair(res, max_used_block_height));
+
+ CHECK_AND_ASSERT_MES(max_used_block_height < m_db->height(), false, "internal error: max used block index=" << max_used_block_height << " is not less then blockchain size = " << m_db->height());
+ max_used_block_id = m_db->get_block_hash_from_height(max_used_block_height);
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::check_tx_outputs(const transaction& tx)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- // from hard fork 2, we forbid dust and compound outputs
- if (m_hardfork->get_current_version() >= 2) {
- BOOST_FOREACH(auto &o, tx.vout) {
- if (!is_valid_decomposed_amount(o.amount)) {
- return false;
- }
+ // from hard fork 2, we forbid dust and compound outputs
+ if (m_hardfork->get_current_version() >= 2) {
+ BOOST_FOREACH(auto &o, tx.vout) {
+ if (!is_valid_decomposed_amount(o.amount)) {
+ return false;
}
}
+ }
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::have_tx_keyimges_as_spent(const transaction &tx) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- BOOST_FOREACH(const txin_v& in, tx.vin)
- {
- CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, in_to_key, true);
- if(have_tx_keyimg_as_spent(in_to_key.k_image))
- return true;
- }
- return false;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ BOOST_FOREACH(const txin_v& in, tx.vin)
+ {
+ CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, in_to_key, true);
+ if(have_tx_keyimg_as_spent(in_to_key.k_image))
+ return true;
+ }
+ return false;
}
//------------------------------------------------------------------
// This function validates transaction inputs and their keys. Previously
@@ -1989,87 +1987,87 @@ bool Blockchain::have_tx_keyimges_as_spent(const transaction &tx) const
// own function.
bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- size_t sig_index = 0;
- if(pmax_used_block_height)
- *pmax_used_block_height = 0;
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ size_t sig_index = 0;
+ if(pmax_used_block_height)
+ *pmax_used_block_height = 0;
- crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx);
+ crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx);
- auto its = m_check_tx_inputs_table.find(tx_prefix_hash);
- if (its != m_check_tx_inputs_table.end())
+ auto its = m_check_tx_inputs_table.find(tx_prefix_hash);
+ if (its != m_check_tx_inputs_table.end())
+ {
+ if (!its->second.first)
+ return false;
+ if (pmax_used_block_height)
+ *pmax_used_block_height = its->second.second;
+ return true;
+ }
+
+ // from hard fork 2, we require mixin at least 2 unless one output cannot mix with 2 others
+ // if one output cannot mix with 2 others, we accept at most 1 output that can mix
+ if (m_hardfork->get_current_version() >= 2)
+ {
+ size_t n_unmixable = 0, n_mixable = 0;
+ size_t mixin = std::numeric_limits<size_t>::max();
+ for (const auto& txin : tx.vin)
{
- if (!its->second.first)
- return false;
- if (pmax_used_block_height)
- *pmax_used_block_height = its->second.second;
- return true;
+ // non txin_to_key inputs will be rejected below
+ if (txin.type() == typeid(txin_to_key))
+ {
+ const txin_to_key& in_to_key = boost::get<txin_to_key>(txin);
+ uint64_t n_outputs = m_db->get_num_outputs(in_to_key.amount);
+ // n_outputs includes the output we're considering
+ if (n_outputs <= 2)
+ ++n_unmixable;
+ else
+ ++n_mixable;
+ if (in_to_key.key_offsets.size() - 1 < mixin)
+ mixin = in_to_key.key_offsets.size() - 1;
+ }
}
-
- // from hard fork 2, we require mixin at least 2 unless one output cannot mix with 2 others
- // if one output cannot mix with 2 others, we accept at most 1 output that can mix
- if (m_hardfork->get_current_version() >= 2)
+ if (mixin < 2)
{
- size_t n_unmixable = 0, n_mixable = 0;
- size_t mixin = std::numeric_limits<size_t>::max();
- for (const auto& txin : tx.vin)
- {
- // non txin_to_key inputs will be rejected below
- if (txin.type() == typeid(txin_to_key))
- {
- const txin_to_key& in_to_key = boost::get<txin_to_key>(txin);
- uint64_t n_outputs = m_db->get_num_outputs(in_to_key.amount);
- // n_outputs includes the output we're considering
- if (n_outputs <= 2)
- ++n_unmixable;
- else
- ++n_mixable;
- if (in_to_key.key_offsets.size() - 1 < mixin)
- mixin = in_to_key.key_offsets.size() - 1;
- }
- }
- if (mixin < 2)
- {
- if (n_unmixable == 0)
- {
- LOG_PRINT_L1("Tx " << get_transaction_hash(tx) << " has too low mixin (" << mixin << "), and no unmixable inputs");
- return false;
- }
- if (n_mixable > 1)
- {
- LOG_PRINT_L1("Tx " << get_transaction_hash(tx) << " has too low mixin (" << mixin << "), and more than one mixable input with unmixable inputs");
- return false;
- }
- }
+ if (n_unmixable == 0)
+ {
+ LOG_PRINT_L1("Tx " << get_transaction_hash(tx) << " has too low mixin (" << mixin << "), and no unmixable inputs");
+ return false;
+ }
+ if (n_mixable > 1)
+ {
+ LOG_PRINT_L1("Tx " << get_transaction_hash(tx) << " has too low mixin (" << mixin << "), and more than one mixable input with unmixable inputs");
+ return false;
+ }
}
+ }
- auto it = m_check_txin_table.find(tx_prefix_hash);
- if(it == m_check_txin_table.end())
- {
- m_check_txin_table.emplace(tx_prefix_hash, std::unordered_map<crypto::key_image, bool>());
- it = m_check_txin_table.find(tx_prefix_hash);
- assert(it != m_check_txin_table.end());
- }
+ auto it = m_check_txin_table.find(tx_prefix_hash);
+ if(it == m_check_txin_table.end())
+ {
+ m_check_txin_table.emplace(tx_prefix_hash, std::unordered_map<crypto::key_image, bool>());
+ it = m_check_txin_table.find(tx_prefix_hash);
+ assert(it != m_check_txin_table.end());
+ }
- uint64_t t_t1 = 0;
- std::vector<std::vector<crypto::public_key>> pubkeys(tx.vin.size());
- std::vector < uint64_t > results;
- results.resize(tx.vin.size(), 0);
+ uint64_t t_t1 = 0;
+ std::vector<std::vector<crypto::public_key>> pubkeys(tx.vin.size());
+ std::vector < uint64_t > results;
+ results.resize(tx.vin.size(), 0);
- int threads = std::thread::hardware_concurrency();
+ int threads = std::thread::hardware_concurrency();
- boost::asio::io_service ioservice;
- boost::thread_group threadpool;
+ boost::asio::io_service ioservice;
+ boost::thread_group threadpool;
+
+ std::auto_ptr < boost::asio::io_service::work > work(new boost::asio::io_service::work(ioservice));
+ if(threads > 1)
+ {
+ for (int i = 0; i < threads; i++)
+ {
+ threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioservice));
+ }
+ }
- std::auto_ptr < boost::asio::io_service::work > work(new boost::asio::io_service::work(ioservice));
- if(threads > 1)
- {
- for (int i = 0; i < threads; i++)
- {
- threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioservice));
- }
- }
-
#define KILL_IOSERVICE() \
if(threads > 1) \
{ \
@@ -2078,119 +2076,119 @@ bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t* pmax_used_bloc
ioservice.stop(); \
} \
- for (const auto& txin : tx.vin)
- {
- // make sure output being spent is of type txin_to_key, rather than
- // e.g. txin_gen, which is only used for miner transactions
- CHECK_AND_ASSERT_MES(txin.type() == typeid(txin_to_key), false, "wrong type id in tx input at Blockchain::check_tx_inputs");
- const txin_to_key& in_to_key = boost::get<txin_to_key>(txin);
+ for (const auto& txin : tx.vin)
+ {
+ // make sure output being spent is of type txin_to_key, rather than
+ // e.g. txin_gen, which is only used for miner transactions
+ CHECK_AND_ASSERT_MES(txin.type() == typeid(txin_to_key), false, "wrong type id in tx input at Blockchain::check_tx_inputs");
+ const txin_to_key& in_to_key = boost::get<txin_to_key>(txin);
- // make sure tx output has key offset(s) (is signed to be used)
- CHECK_AND_ASSERT_MES(in_to_key.key_offsets.size(), false, "empty in_to_key.key_offsets in transaction with id " << get_transaction_hash(tx));
+ // make sure tx output has key offset(s) (is signed to be used)
+ CHECK_AND_ASSERT_MES(in_to_key.key_offsets.size(), false, "empty in_to_key.key_offsets in transaction with id " << get_transaction_hash(tx));
- // basically, make sure number of inputs == number of signatures
- CHECK_AND_ASSERT_MES(sig_index < tx.signatures.size(), false, "wrong transaction: not signature entry for input with index= " << sig_index);
+ // basically, make sure number of inputs == number of signatures
+ CHECK_AND_ASSERT_MES(sig_index < tx.signatures.size(), false, "wrong transaction: not signature entry for input with index= " << sig_index);
#if defined(CACHE_VIN_RESULTS)
- auto itk = it->second.find(in_to_key.k_image);
- if(itk != it->second.end())
- {
- if(!itk->second)
- {
- LOG_PRINT_L1("Failed ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index);
- return false;
- }
-
- // txin has been verified already, skip
- sig_index++;
- continue;
- }
+ auto itk = it->second.find(in_to_key.k_image);
+ if(itk != it->second.end())
+ {
+ if(!itk->second)
+ {
+ LOG_PRINT_L1("Failed ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index);
+ return false;
+ }
+
+ // txin has been verified already, skip
+ sig_index++;
+ continue;
+ }
#endif
- // make sure that output being spent matches up correctly with the
- // signature spending it.
- TIME_MEASURE_START(aa);
- if (!check_tx_input(in_to_key, tx_prefix_hash, tx.signatures[sig_index], pubkeys[sig_index], pmax_used_block_height))
- {
- it->second[in_to_key.k_image] = false;
- LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index);
- if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain()
- {
- LOG_PRINT_L1(" *pmax_used_block_height: " << *pmax_used_block_height);
- }
+ // make sure that output being spent matches up correctly with the
+ // signature spending it.
+ TIME_MEASURE_START(aa);
+ if (!check_tx_input(in_to_key, tx_prefix_hash, tx.signatures[sig_index], pubkeys[sig_index], pmax_used_block_height))
+ {
+ it->second[in_to_key.k_image] = false;
+ LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index);
+ if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain()
+ {
+ LOG_PRINT_L1(" *pmax_used_block_height: " << *pmax_used_block_height);
+ }
- KILL_IOSERVICE();
- return false;
- }
+ KILL_IOSERVICE();
+ return false;
+ }
- if (threads > 1)
+ if (threads > 1)
+ {
+ // ND: Speedup
+ // 1. Thread ring signature verification if possible.
+ ioservice.dispatch(boost::bind(&Blockchain::check_ring_signature, this, std::cref(tx_prefix_hash), std::cref(in_to_key.k_image), std::cref(pubkeys[sig_index]), std::cref(tx.signatures[sig_index]), std::ref(results[sig_index])));
+ }
+ else
+ {
+ check_ring_signature(tx_prefix_hash, in_to_key.k_image, pubkeys[sig_index], tx.signatures[sig_index], results[sig_index]);
+ if (!results[sig_index])
+ {
+ it->second[in_to_key.k_image] = false;
+ LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index);
+
+ if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain()
{
- // ND: Speedup
- // 1. Thread ring signature verification if possible.
- ioservice.dispatch(boost::bind(&Blockchain::check_ring_signature, this, std::cref(tx_prefix_hash), std::cref(in_to_key.k_image), std::cref(pubkeys[sig_index]), std::cref(tx.signatures[sig_index]), std::ref(results[sig_index])));
+ LOG_PRINT_L1("*pmax_used_block_height: " << *pmax_used_block_height);
}
- else
- {
- check_ring_signature(tx_prefix_hash, in_to_key.k_image, pubkeys[sig_index], tx.signatures[sig_index], results[sig_index]);
- if (!results[sig_index])
- {
- it->second[in_to_key.k_image] = false;
- LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index);
- if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain()
- {
- LOG_PRINT_L1("*pmax_used_block_height: " << *pmax_used_block_height);
- }
+ KILL_IOSERVICE();
+ return false;
+ }
+ it->second[in_to_key.k_image] = true;
+ }
- KILL_IOSERVICE();
- return false;
- }
- it->second[in_to_key.k_image] = true;
- }
+ sig_index++;
+ }
- sig_index++;
- }
+ KILL_IOSERVICE();
- KILL_IOSERVICE();
+ if (threads > 1)
+ {
+ // save results to table, passed or otherwise
+ bool failed = false;
+ for (size_t i = 0; i < tx.vin.size(); i++)
+ {
+ const txin_to_key& in_to_key = boost::get<txin_to_key>(tx.vin[i]);
+ it->second[in_to_key.k_image] = results[i];
+ if(!failed && !results[i])
+ failed = true;
+ }
- if (threads > 1)
+ if (failed)
{
- // save results to table, passed or otherwise
- bool failed = false;
- for (size_t i = 0; i < tx.vin.size(); i++)
- {
- const txin_to_key& in_to_key = boost::get<txin_to_key>(tx.vin[i]);
- it->second[in_to_key.k_image] = results[i];
- if(!failed && !results[i])
- failed = true;
- }
-
- if (failed)
- {
- LOG_PRINT_L1("Failed to check ring signatures!, t_loop: " << t_t1);
- return false;
- }
+ LOG_PRINT_L1("Failed to check ring signatures!, t_loop: " << t_t1);
+ return false;
}
- LOG_PRINT_L1("t_loop: " << t_t1);
- return true;
+ }
+ LOG_PRINT_L1("t_loop: " << t_t1);
+ return true;
}
//------------------------------------------------------------------
void Blockchain::check_ring_signature(const crypto::hash &tx_prefix_hash, const crypto::key_image &key_image, const std::vector<crypto::public_key> &pubkeys, const std::vector<crypto::signature>& sig, uint64_t &result)
{
- if (m_is_in_checkpoint_zone)
- {
- result = true;
- return;
- }
+ if (m_is_in_checkpoint_zone)
+ {
+ result = true;
+ return;
+ }
- std::vector<const crypto::public_key *> p_output_keys;
- for (auto &key : pubkeys)
- {
- p_output_keys.push_back(&key);
- }
+ std::vector<const crypto::public_key *> p_output_keys;
+ for (auto &key : pubkeys)
+ {
+ p_output_keys.push_back(&key);
+ }
- result = crypto::check_ring_signature(tx_prefix_hash, key_image, p_output_keys, sig.data()) ? 1 : 0;
+ result = crypto::check_ring_signature(tx_prefix_hash, key_image, p_output_keys, sig.data()) ? 1 : 0;
}
//------------------------------------------------------------------
@@ -2198,26 +2196,26 @@ void Blockchain::check_ring_signature(const crypto::hash &tx_prefix_hash, const
// a block index or a unix time.
bool Blockchain::is_tx_spendtime_unlocked(uint64_t unlock_time) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- if(unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER)
- {
- // ND: Instead of calling get_current_blockchain_height(), call m_db->height()
- // directly as get_current_blockchain_height() locks the recursive mutex.
- if(m_db->height() + CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_BLOCKS >= unlock_time)
- return true;
- else
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ if(unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER)
+ {
+ // ND: Instead of calling get_current_blockchain_height(), call m_db->height()
+ // directly as get_current_blockchain_height() locks the recursive mutex.
+ if(m_db->height() + CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_BLOCKS >= unlock_time)
+ return true;
else
- {
- //interpret as time
- uint64_t current_time = static_cast<uint64_t>(time(NULL));
- if(current_time + (get_current_hard_fork_version() < 2 ? CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_SECONDS_V1 : CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_SECONDS) >= unlock_time)
- return true;
- else
- return false;
- }
- return false;
+ return false;
+ }
+ else
+ {
+ //interpret as time
+ uint64_t current_time = static_cast<uint64_t>(time(NULL));
+ if(current_time + (get_current_hard_fork_version() < 2 ? CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_SECONDS_V1 : CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_SECONDS) >= unlock_time)
+ return true;
+ else
+ return false;
+ }
+ return false;
}
//------------------------------------------------------------------
// This function locates all outputs associated with a given input (mixins)
@@ -2225,74 +2223,74 @@ bool Blockchain::is_tx_spendtime_unlocked(uint64_t unlock_time) const
// signature for each input.
bool Blockchain::check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector<crypto::signature>& sig, std::vector<crypto::public_key> &output_keys, uint64_t* pmax_related_block_height)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
-
- // ND:
- // 1. Disable locking and make method private.
- //CRITICAL_REGION_LOCAL(m_blockchain_lock);
-
- struct outputs_visitor
- {
- std::vector<crypto::public_key >& m_output_keys;
- const Blockchain& m_bch;
- outputs_visitor(std::vector<crypto::public_key>& output_keys, const Blockchain& bch) :
- m_output_keys(output_keys), m_bch(bch)
- {
- }
- bool handle_output(uint64_t unlock_time, const crypto::public_key &pubkey)
- {
- //check tx unlock time
- if (!m_bch.is_tx_spendtime_unlocked(unlock_time))
- {
- LOG_PRINT_L1("One of outputs for one of inputs has wrong tx.unlock_time = " << unlock_time);
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
- m_output_keys.push_back(pubkey);
- return true;
- }
- };
+ // ND:
+ // 1. Disable locking and make method private.
+ //CRITICAL_REGION_LOCAL(m_blockchain_lock);
- output_keys.clear();
-
- //check ring signature
- outputs_visitor vi(output_keys, *this);
- if (!scan_outputkeys_for_indexes(txin, vi, tx_prefix_hash, pmax_related_block_height))
+ struct outputs_visitor
+ {
+ std::vector<crypto::public_key >& m_output_keys;
+ const Blockchain& m_bch;
+ outputs_visitor(std::vector<crypto::public_key>& output_keys, const Blockchain& bch) :
+ m_output_keys(output_keys), m_bch(bch)
{
- LOG_PRINT_L1("Failed to get output keys for tx with amount = " << print_money(txin.amount) << " and count indexes " << txin.key_offsets.size());
- return false;
}
-
- if(txin.key_offsets.size() != output_keys.size())
+ bool handle_output(uint64_t unlock_time, const crypto::public_key &pubkey)
{
- LOG_PRINT_L1("Output keys for tx with amount = " << txin.amount << " and count indexes " << txin.key_offsets.size() << " returned wrong keys count " << output_keys.size());
+ //check tx unlock time
+ if (!m_bch.is_tx_spendtime_unlocked(unlock_time))
+ {
+ LOG_PRINT_L1("One of outputs for one of inputs has wrong tx.unlock_time = " << unlock_time);
return false;
+ }
+
+ m_output_keys.push_back(pubkey);
+ return true;
}
- CHECK_AND_ASSERT_MES(sig.size() == output_keys.size(), false, "internal error: tx signatures count=" << sig.size() << " mismatch with outputs keys count for inputs=" << output_keys.size());
- return true;
+ };
+
+ output_keys.clear();
+
+ //check ring signature
+ outputs_visitor vi(output_keys, *this);
+ if (!scan_outputkeys_for_indexes(txin, vi, tx_prefix_hash, pmax_related_block_height))
+ {
+ LOG_PRINT_L1("Failed to get output keys for tx with amount = " << print_money(txin.amount) << " and count indexes " << txin.key_offsets.size());
+ return false;
+ }
+
+ if(txin.key_offsets.size() != output_keys.size())
+ {
+ LOG_PRINT_L1("Output keys for tx with amount = " << txin.amount << " and count indexes " << txin.key_offsets.size() << " returned wrong keys count " << output_keys.size());
+ return false;
+ }
+ CHECK_AND_ASSERT_MES(sig.size() == output_keys.size(), false, "internal error: tx signatures count=" << sig.size() << " mismatch with outputs keys count for inputs=" << output_keys.size());
+ return true;
}
//------------------------------------------------------------------
//TODO: Is this intended to do something else? Need to look into the todo there.
uint64_t Blockchain::get_adjusted_time() const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- //TODO: add collecting median time
- return time(NULL);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ //TODO: add collecting median time
+ return time(NULL);
}
//------------------------------------------------------------------
//TODO: revisit, has changed a bit on upstream
bool Blockchain::check_block_timestamp(std::vector<uint64_t>& timestamps, const block& b) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- uint64_t median_ts = epee::misc_utils::median(timestamps);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ uint64_t median_ts = epee::misc_utils::median(timestamps);
- if(b.timestamp < median_ts)
- {
- LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", less than median of last " << BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW << " blocks, " << median_ts);
- return false;
- }
+ if(b.timestamp < median_ts)
+ {
+ LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", less than median of last " << BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW << " blocks, " << median_ts);
+ return false;
+ }
- return true;
+ return true;
}
//------------------------------------------------------------------
// This function grabs the timestamps from the most recent <n> blocks,
@@ -2304,30 +2302,30 @@ bool Blockchain::check_block_timestamp(std::vector<uint64_t>& timestamps, const
// false otherwise
bool Blockchain::check_block_timestamp(const block& b) const
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- if(b.timestamp > get_adjusted_time() + CRYPTONOTE_BLOCK_FUTURE_TIME_LIMIT)
- {
- LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", bigger than adjusted time + 2 hours");
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ if(b.timestamp > get_adjusted_time() + CRYPTONOTE_BLOCK_FUTURE_TIME_LIMIT)
+ {
+ LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", bigger than adjusted time + 2 hours");
+ return false;
+ }
- // if not enough blocks, no proper median yet, return true
- if(m_db->height() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)
- {
- return true;
- }
+ // if not enough blocks, no proper median yet, return true
+ if(m_db->height() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)
+ {
+ return true;
+ }
- std::vector<uint64_t> timestamps;
- auto h = m_db->height();
+ std::vector<uint64_t> timestamps;
+ auto h = m_db->height();
- // need most recent 60 blocks, get index of first of those
- size_t offset = h - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW;
- for(;offset < h; ++offset)
- {
- timestamps.push_back(m_db->get_block_timestamp(offset));
- }
+ // need most recent 60 blocks, get index of first of those
+ size_t offset = h - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW;
+ for(;offset < h; ++offset)
+ {
+ timestamps.push_back(m_db->get_block_timestamp(offset));
+ }
- return check_block_timestamp(timestamps, b);
+ return check_block_timestamp(timestamps, b);
}
//------------------------------------------------------------------
// Needs to validate the block and acquire each transaction from the
@@ -2335,394 +2333,394 @@ bool Blockchain::check_block_timestamp(const block& b) const
// m_db->add_block()
bool Blockchain::handle_block_to_main_chain(const block& bl, const crypto::hash& id, block_verification_context& bvc)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
-
- TIME_MEASURE_START(block_processing_time);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- TIME_MEASURE_START(t1);
-
- // this is a cheap test
- if (!m_hardfork->check(bl))
- {
- LOG_PRINT_L1("Block with id: " << id << std::endl << "has old version: " << (unsigned)bl.major_version << std::endl << "current: " << (unsigned)m_hardfork->get_current_version());
- return false;
- }
-
- if(bl.prev_id != get_tail_id())
- {
- LOG_PRINT_L1("Block with id: " << id << std::endl << "has wrong prev_id: " << bl.prev_id << std::endl << "expected: " << get_tail_id());
- return false;
- }
-
- TIME_MEASURE_FINISH(t1);
- TIME_MEASURE_START(t2);
-
- // make sure block timestamp is not less than the median timestamp
- // of a set number of the most recent blocks.
- if(!check_block_timestamp(bl))
- {
- LOG_PRINT_L1("Block with id: " << id << std::endl << "has invalid timestamp: " << bl.timestamp);
- bvc.m_verifivation_failed = true;
- return false;
- }
-
- TIME_MEASURE_FINISH(t2);
- //check proof of work
- TIME_MEASURE_START(target_calculating_time);
+ LOG_PRINT_L3("Blockchain::" << __func__);
- // get the target difficulty for the block.
- // the calculation can overflow, among other failure cases,
- // so we need to check the return type.
- // FIXME: get_difficulty_for_next_block can also assert, look into
- // changing this to throwing exceptions instead so we can clean up.
- difficulty_type current_diffic = get_difficulty_for_next_block();
- CHECK_AND_ASSERT_MES(current_diffic, false, "!!!!!!!!! difficulty overhead !!!!!!!!!");
+ TIME_MEASURE_START(block_processing_time);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ TIME_MEASURE_START(t1);
- TIME_MEASURE_FINISH(target_calculating_time);
+ // this is a cheap test
+ if (!m_hardfork->check(bl))
+ {
+ LOG_PRINT_L1("Block with id: " << id << std::endl << "has old version: " << (unsigned)bl.major_version << std::endl << "current: " << (unsigned)m_hardfork->get_current_version());
+ return false;
+ }
- TIME_MEASURE_START(longhash_calculating_time);
+ if(bl.prev_id != get_tail_id())
+ {
+ LOG_PRINT_L1("Block with id: " << id << std::endl << "has wrong prev_id: " << bl.prev_id << std::endl << "expected: " << get_tail_id());
+ return false;
+ }
- crypto::hash proof_of_work = null_hash;
+ TIME_MEASURE_FINISH(t1);
+ TIME_MEASURE_START(t2);
- // Formerly the code below contained an if loop with the following condition
- // !m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height())
- // however, this caused the daemon to not bother checking PoW for blocks
- // before checkpoints, which is very dangerous behaviour. We moved the PoW
- // validation out of the next chunk of code to make sure that we correctly
- // check PoW now.
- // FIXME: height parameter is not used...should it be used or should it not
- // be a parameter?
- // validate proof_of_work versus difficulty target
- bool precomputed = false;
+ // make sure block timestamp is not less than the median timestamp
+ // of a set number of the most recent blocks.
+ if(!check_block_timestamp(bl))
+ {
+ LOG_PRINT_L1("Block with id: " << id << std::endl << "has invalid timestamp: " << bl.timestamp);
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
+
+ TIME_MEASURE_FINISH(t2);
+ //check proof of work
+ TIME_MEASURE_START(target_calculating_time);
+
+ // get the target difficulty for the block.
+ // the calculation can overflow, among other failure cases,
+ // so we need to check the return type.
+ // FIXME: get_difficulty_for_next_block can also assert, look into
+ // changing this to throwing exceptions instead so we can clean up.
+ difficulty_type current_diffic = get_difficulty_for_next_block();
+ CHECK_AND_ASSERT_MES(current_diffic, false, "!!!!!!!!! difficulty overhead !!!!!!!!!");
+
+ TIME_MEASURE_FINISH(target_calculating_time);
+
+ TIME_MEASURE_START(longhash_calculating_time);
+
+ crypto::hash proof_of_work = null_hash;
+
+ // Formerly the code below contained an if loop with the following condition
+ // !m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height())
+ // however, this caused the daemon to not bother checking PoW for blocks
+ // before checkpoints, which is very dangerous behaviour. We moved the PoW
+ // validation out of the next chunk of code to make sure that we correctly
+ // check PoW now.
+ // FIXME: height parameter is not used...should it be used or should it not
+ // be a parameter?
+ // validate proof_of_work versus difficulty target
+ bool precomputed = false;
#if defined(PER_BLOCK_CHECKPOINT)
- bool fast_check = false;
- if (m_db->height() < m_blocks_hash_check.size())
+ bool fast_check = false;
+ if (m_db->height() < m_blocks_hash_check.size())
+ {
+ auto hash = get_block_hash(bl);
+ if (memcmp(&hash, &m_blocks_hash_check[m_db->height()], sizeof(hash)) != 0)
+ {
+ LOG_PRINT_L1("Block with id is INVALID: " << id);
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
+ fast_check = true;
+ }
+ else
+#endif
+ {
+ auto it = m_blocks_longhash_table.find(id);
+ if (it != m_blocks_longhash_table.end())
{
- auto hash = get_block_hash(bl);
- if (memcmp(&hash, &m_blocks_hash_check[m_db->height()], sizeof(hash)) != 0)
- {
- LOG_PRINT_L1("Block with id is INVALID: " << id);
- bvc.m_verifivation_failed = true;
- return false;
- }
- fast_check = true;
+ precomputed = true;
+ proof_of_work = it->second;
}
else
-#endif
- {
- auto it = m_blocks_longhash_table.find(id);
- if (it != m_blocks_longhash_table.end())
- {
- precomputed = true;
- proof_of_work = it->second;
- }
- else
- proof_of_work = get_block_longhash(bl, m_db->height());
+ proof_of_work = get_block_longhash(bl, m_db->height());
- // validate proof_of_work versus difficulty target
- if(!check_hash(proof_of_work, current_diffic))
- {
- LOG_PRINT_L1("Block with id: " << id << std::endl << "does not have enough proof of work: " << proof_of_work << std::endl << "unexpected difficulty: " << current_diffic);
- bvc.m_verifivation_failed = true;
- return false;
- }
+ // validate proof_of_work versus difficulty target
+ if(!check_hash(proof_of_work, current_diffic))
+ {
+ LOG_PRINT_L1("Block with id: " << id << std::endl << "does not have enough proof of work: " << proof_of_work << std::endl << "unexpected difficulty: " << current_diffic);
+ bvc.m_verifivation_failed = true;
+ return false;
}
+ }
- // If we're at a checkpoint, ensure that our hardcoded checkpoint hash
- // is correct.
- if(m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height()))
+ // If we're at a checkpoint, ensure that our hardcoded checkpoint hash
+ // is correct.
+ if(m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height()))
+ {
+ if(!m_checkpoints.check_block(get_current_blockchain_height(), id))
{
- if(!m_checkpoints.check_block(get_current_blockchain_height(), id))
- {
- LOG_ERROR("CHECKPOINT VALIDATION FAILED");
- bvc.m_verifivation_failed = true;
- return false;
- }
+ LOG_ERROR("CHECKPOINT VALIDATION FAILED");
+ bvc.m_verifivation_failed = true;
+ return false;
}
+ }
- TIME_MEASURE_FINISH(longhash_calculating_time);
- if (precomputed)
- longhash_calculating_time += m_fake_pow_calc_time;
+ TIME_MEASURE_FINISH(longhash_calculating_time);
+ if (precomputed)
+ longhash_calculating_time += m_fake_pow_calc_time;
- TIME_MEASURE_START(t3);
+ TIME_MEASURE_START(t3);
- // sanity check basic miner tx properties;
- if(!prevalidate_miner_transaction(bl, m_db->height()))
- {
- LOG_PRINT_L1("Block with id: " << id << " failed to pass prevalidation");
- bvc.m_verifivation_failed = true;
- return false;
- }
+ // sanity check basic miner tx properties;
+ if(!prevalidate_miner_transaction(bl, m_db->height()))
+ {
+ LOG_PRINT_L1("Block with id: " << id << " failed to pass prevalidation");
+ bvc.m_verifivation_failed = true;
+ return false;
+ }
- size_t coinbase_blob_size = get_object_blobsize(bl.miner_tx);
- size_t cumulative_block_size = coinbase_blob_size;
+ size_t coinbase_blob_size = get_object_blobsize(bl.miner_tx);
+ size_t cumulative_block_size = coinbase_blob_size;
- std::vector<transaction> txs;
- key_images_container keys;
+ std::vector<transaction> txs;
+ key_images_container keys;
- uint64_t fee_summary = 0;
- uint64_t t_checktx = 0;
- uint64_t t_exists = 0;
- uint64_t t_pool = 0;
- uint64_t t_dblspnd = 0;
- bool add_tx_to_pool = false;
- TIME_MEASURE_FINISH(t3);
+ uint64_t fee_summary = 0;
+ uint64_t t_checktx = 0;
+ uint64_t t_exists = 0;
+ uint64_t t_pool = 0;
+ uint64_t t_dblspnd = 0;
+ bool add_tx_to_pool = false;
+ TIME_MEASURE_FINISH(t3);
// XXX old code adds miner tx here
- int tx_index = 0;
- // Iterate over the block's transaction hashes, grabbing each
- // from the tx_pool and validating them. Each is then added
- // to txs. Keys spent in each are added to <keys> by the double spend check.
- for (const crypto::hash& tx_id : bl.tx_hashes)
- {
- transaction tx;
- size_t blob_size = 0;
- uint64_t fee = 0;
- bool relayed = false;
- TIME_MEASURE_START(aa);
+ int tx_index = 0;
+ // Iterate over the block's transaction hashes, grabbing each
+ // from the tx_pool and validating them. Each is then added
+ // to txs. Keys spent in each are added to <keys> by the double spend check.
+ for (const crypto::hash& tx_id : bl.tx_hashes)
+ {
+ transaction tx;
+ size_t blob_size = 0;
+ uint64_t fee = 0;
+ bool relayed = false;
+ TIME_MEASURE_START(aa);
// XXX old code does not check whether tx exists
- if (m_db->tx_exists(tx_id))
- {
- LOG_PRINT_L1("Block with id: " << id << " attempting to add transaction already in blockchain with id: " << tx_id);
- bvc.m_verifivation_failed = true;
- break;
- }
+ if (m_db->tx_exists(tx_id))
+ {
+ LOG_PRINT_L1("Block with id: " << id << " attempting to add transaction already in blockchain with id: " << tx_id);
+ bvc.m_verifivation_failed = true;
+ break;
+ }
- TIME_MEASURE_FINISH(aa);
- t_exists += aa;
- TIME_MEASURE_START(bb);
+ TIME_MEASURE_FINISH(aa);
+ t_exists += aa;
+ TIME_MEASURE_START(bb);
- // get transaction with hash <tx_id> from tx_pool
- if(!m_tx_pool.take_tx(tx_id, tx, blob_size, fee, relayed))
- {
- LOG_PRINT_L1("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id);
- bvc.m_verifivation_failed = true;
- break;
- }
+ // get transaction with hash <tx_id> from tx_pool
+ if(!m_tx_pool.take_tx(tx_id, tx, blob_size, fee, relayed))
+ {
+ LOG_PRINT_L1("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id);
+ bvc.m_verifivation_failed = true;
+ break;
+ }
+
+ TIME_MEASURE_FINISH(bb);
+ t_pool += bb;
+ // add the transaction to the temp list of transactions, so we can either
+ // store the list of transactions all at once or return the ones we've
+ // taken from the tx_pool back to it if the block fails verification.
+ txs.push_back(tx);
+ TIME_MEASURE_START(dd);
- TIME_MEASURE_FINISH(bb);
- t_pool += bb;
- // add the transaction to the temp list of transactions, so we can either
- // store the list of transactions all at once or return the ones we've
- // taken from the tx_pool back to it if the block fails verification.
- txs.push_back(tx);
- TIME_MEASURE_START(dd);
-
- // ND: this is not needed, db->add_block() checks for duplicate k_images and fails accordingly.
- // if (!check_for_double_spend(tx, keys))
- // {
- // LOG_PRINT_L0("Double spend detected in transaction (id: " << tx_id);
- // bvc.m_verifivation_failed = true;
- // break;
- // }
-
- TIME_MEASURE_FINISH(dd);
- t_dblspnd += dd;
- TIME_MEASURE_START(cc);
+ // ND: this is not needed, db->add_block() checks for duplicate k_images and fails accordingly.
+ // if (!check_for_double_spend(tx, keys))
+ // {
+ // LOG_PRINT_L0("Double spend detected in transaction (id: " << tx_id);
+ // bvc.m_verifivation_failed = true;
+ // break;
+ // }
+
+ TIME_MEASURE_FINISH(dd);
+ t_dblspnd += dd;
+ TIME_MEASURE_START(cc);
#if defined(PER_BLOCK_CHECKPOINT)
- if (!fast_check)
+ if (!fast_check)
#endif
- {
- // validate that transaction inputs and the keys spending them are correct.
- if(!check_tx_inputs(tx))
- {
- LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs.");
-
- //TODO: why is this done? make sure that keeping invalid blocks makes sense.
- add_block_as_invalid(bl, id);
- LOG_PRINT_L1("Block with id " << id << " added as invalid because of wrong inputs in transactions");
- bvc.m_verifivation_failed = true;
- add_tx_to_pool = true;
- break;
- }
- }
-#if defined(PER_BLOCK_CHECKPOINT)
- else
- {
- // ND: if fast_check is enabled for blocks, there is no need to check
- // the transaction inputs, but do some sanity checks anyway.
- if (memcmp(&m_blocks_txs_check[tx_index++], &tx_id, sizeof(tx_id)) != 0)
- {
- LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs.");
- //TODO: why is this done? make sure that keeping invalid blocks makes sense.
- add_block_as_invalid(bl, id);
- LOG_PRINT_L1("Block with id " << id << " added as invalid because of wrong inputs in transactions");
- bvc.m_verifivation_failed = true;
- add_tx_to_pool = true;
- break;
- }
- }
-#endif
- TIME_MEASURE_FINISH(cc);
- t_checktx += cc;
- fee_summary += fee;
- cumulative_block_size += blob_size;
- }
-
- m_blocks_txs_check.clear();
+ {
+ // validate that transaction inputs and the keys spending them are correct.
+ if(!check_tx_inputs(tx))
+ {
+ LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs.");
- TIME_MEASURE_START(vmt);
- uint64_t base_reward = 0;
- uint64_t already_generated_coins = m_db->height() ? m_db->get_block_already_generated_coins(m_db->height() - 1) : 0;
- if(!validate_miner_transaction(bl, cumulative_block_size, fee_summary, base_reward, already_generated_coins))
+ //TODO: why is this done? make sure that keeping invalid blocks makes sense.
+ add_block_as_invalid(bl, id);
+ LOG_PRINT_L1("Block with id " << id << " added as invalid because of wrong inputs in transactions");
+ bvc.m_verifivation_failed = true;
+ add_tx_to_pool = true;
+ break;
+ }
+ }
+#if defined(PER_BLOCK_CHECKPOINT)
+ else
{
- LOG_PRINT_L1("Block with id: " << id << " has incorrect miner transaction");
+ // ND: if fast_check is enabled for blocks, there is no need to check
+ // the transaction inputs, but do some sanity checks anyway.
+ if (memcmp(&m_blocks_txs_check[tx_index++], &tx_id, sizeof(tx_id)) != 0)
+ {
+ LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs.");
+ //TODO: why is this done? make sure that keeping invalid blocks makes sense.
+ add_block_as_invalid(bl, id);
+ LOG_PRINT_L1("Block with id " << id << " added as invalid because of wrong inputs in transactions");
bvc.m_verifivation_failed = true;
+ add_tx_to_pool = true;
+ break;
+ }
}
-
- TIME_MEASURE_FINISH(vmt);
- size_t block_size;
- difficulty_type cumulative_difficulty;
-
- // populate various metadata about the block to be stored alongside it.
- block_size = cumulative_block_size;
- cumulative_difficulty = current_diffic;
- already_generated_coins = already_generated_coins + base_reward;
- if(m_db->height())
- cumulative_difficulty += m_db->get_block_cumulative_difficulty(m_db->height() - 1);
-
- TIME_MEASURE_FINISH(block_processing_time);
- if(precomputed)
- block_processing_time += m_fake_pow_calc_time;
-
- TIME_MEASURE_START(addblock);
- uint64_t new_height = 0;
- bool add_success = true;
- if (!bvc.m_verifivation_failed)
+#endif
+ TIME_MEASURE_FINISH(cc);
+ t_checktx += cc;
+ fee_summary += fee;
+ cumulative_block_size += blob_size;
+ }
+
+ m_blocks_txs_check.clear();
+
+ TIME_MEASURE_START(vmt);
+ uint64_t base_reward = 0;
+ uint64_t already_generated_coins = m_db->height() ? m_db->get_block_already_generated_coins(m_db->height() - 1) : 0;
+ if(!validate_miner_transaction(bl, cumulative_block_size, fee_summary, base_reward, already_generated_coins))
+ {
+ LOG_PRINT_L1("Block with id: " << id << " has incorrect miner transaction");
+ bvc.m_verifivation_failed = true;
+ }
+
+ TIME_MEASURE_FINISH(vmt);
+ size_t block_size;
+ difficulty_type cumulative_difficulty;
+
+ // populate various metadata about the block to be stored alongside it.
+ block_size = cumulative_block_size;
+ cumulative_difficulty = current_diffic;
+ already_generated_coins = already_generated_coins + base_reward;
+ if(m_db->height())
+ cumulative_difficulty += m_db->get_block_cumulative_difficulty(m_db->height() - 1);
+
+ TIME_MEASURE_FINISH(block_processing_time);
+ if(precomputed)
+ block_processing_time += m_fake_pow_calc_time;
+
+ TIME_MEASURE_START(addblock);
+ uint64_t new_height = 0;
+ bool add_success = true;
+ if (!bvc.m_verifivation_failed)
+ {
+ try
{
- try
- {
- new_height = m_db->add_block(bl, block_size, cumulative_difficulty, already_generated_coins, txs);
- }
- catch (const std::exception& e)
- {
- //TODO: figure out the best way to deal with this failure
- LOG_ERROR("Error adding block with hash: " << id << " to blockchain, what = " << e.what());
- add_success = false;
- }
+ new_height = m_db->add_block(bl, block_size, cumulative_difficulty, already_generated_coins, txs);
}
-
- // if we failed for any reason to verify the block, return taken
- // transactions to the tx_pool.
- if ((bvc.m_verifivation_failed && add_tx_to_pool) || !add_success)
+ catch (const std::exception& e)
{
- // return taken transactions to transaction pool
- for (auto& tx : txs)
- {
- cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
- // We assume that if they were in a block, the transactions are already
- // known to the network as a whole. However, if we had mined that block,
- // that might not be always true. Unlikely though, and always relaying
- // these again might cause a spike of traffic as many nodes re-relay
- // all the transactions in a popped block when a reorg happens.
- if (!m_tx_pool.add_tx(tx, tvc, true, true))
- {
- LOG_PRINT_L0("Failed to return taken transaction with hash: " << get_transaction_hash(tx) << " to tx_pool");
- }
- }
- return false;
+ //TODO: figure out the best way to deal with this failure
+ LOG_ERROR("Error adding block with hash: " << id << " to blockchain, what = " << e.what());
+ add_success = false;
+ }
+ }
+
+ // if we failed for any reason to verify the block, return taken
+ // transactions to the tx_pool.
+ if ((bvc.m_verifivation_failed && add_tx_to_pool) || !add_success)
+ {
+ // return taken transactions to transaction pool
+ for (auto& tx : txs)
+ {
+ cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
+ // We assume that if they were in a block, the transactions are already
+ // known to the network as a whole. However, if we had mined that block,
+ // that might not be always true. Unlikely though, and always relaying
+ // these again might cause a spike of traffic as many nodes re-relay
+ // all the transactions in a popped block when a reorg happens.
+ if (!m_tx_pool.add_tx(tx, tvc, true, true))
+ {
+ LOG_PRINT_L0("Failed to return taken transaction with hash: " << get_transaction_hash(tx) << " to tx_pool");
+ }
}
+ return false;
+ }
- TIME_MEASURE_FINISH(addblock);
+ TIME_MEASURE_FINISH(addblock);
- // this will not fail since check succeeded above
- m_hardfork->add(bl, new_height - 1);
+ // this will not fail since check succeeded above
+ m_hardfork->add(bl, new_height - 1);
- // do this after updating the hard fork state since the size limit may change due to fork
- update_next_cumulative_size_limit();
+ // do this after updating the hard fork state since the size limit may change due to fork
+ update_next_cumulative_size_limit();
- LOG_PRINT_L1("+++++ BLOCK SUCCESSFULLY ADDED" << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "HEIGHT " << new_height-1 << ", difficulty:\t" << current_diffic << std::endl << "block reward: " << print_money(fee_summary + base_reward) << "(" << print_money(base_reward) << " + " << print_money(fee_summary) << "), coinbase_blob_size: " << coinbase_blob_size << ", cumulative size: " << cumulative_block_size << ", " << block_processing_time << "(" << target_calculating_time << "/" << longhash_calculating_time << ")ms");
- if(m_show_time_stats)
- {
- LOG_PRINT_L0("Height: " << new_height << " blob: " << coinbase_blob_size << " cumm: "
- << cumulative_block_size << " p/t: " << block_processing_time << " ("
- << target_calculating_time << "/" << longhash_calculating_time << "/"
- << t1 << "/" << t2 << "/" << t3 << "/" << t_exists << "/" << t_pool
- << "/" << t_checktx << "/" << t_dblspnd << "/" << vmt << "/" << addblock << ")ms");
- }
+ LOG_PRINT_L1("+++++ BLOCK SUCCESSFULLY ADDED" << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "HEIGHT " << new_height-1 << ", difficulty:\t" << current_diffic << std::endl << "block reward: " << print_money(fee_summary + base_reward) << "(" << print_money(base_reward) << " + " << print_money(fee_summary) << "), coinbase_blob_size: " << coinbase_blob_size << ", cumulative size: " << cumulative_block_size << ", " << block_processing_time << "(" << target_calculating_time << "/" << longhash_calculating_time << ")ms");
+ if(m_show_time_stats)
+ {
+ LOG_PRINT_L0("Height: " << new_height << " blob: " << coinbase_blob_size << " cumm: "
+ << cumulative_block_size << " p/t: " << block_processing_time << " ("
+ << target_calculating_time << "/" << longhash_calculating_time << "/"
+ << t1 << "/" << t2 << "/" << t3 << "/" << t_exists << "/" << t_pool
+ << "/" << t_checktx << "/" << t_dblspnd << "/" << vmt << "/" << addblock << ")ms");
+ }
- bvc.m_added_to_main_chain = true;
- ++m_sync_counter;
+ bvc.m_added_to_main_chain = true;
+ ++m_sync_counter;
- // appears to be a NOP *and* is called elsewhere. wat?
- m_tx_pool.on_blockchain_inc(new_height, id);
+ // appears to be a NOP *and* is called elsewhere. wat?
+ m_tx_pool.on_blockchain_inc(new_height, id);
- return true;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::update_next_cumulative_size_limit()
{
- uint64_t full_reward_zone = get_current_hard_fork_version() < 2 ? CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE_V1 : CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE;
+ uint64_t full_reward_zone = get_current_hard_fork_version() < 2 ? CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE_V1 : CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE;
- LOG_PRINT_L3("Blockchain::" << __func__);
- std::vector<size_t> sz;
- get_last_n_blocks_sizes(sz, CRYPTONOTE_REWARD_BLOCKS_WINDOW);
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ std::vector<size_t> sz;
+ get_last_n_blocks_sizes(sz, CRYPTONOTE_REWARD_BLOCKS_WINDOW);
- uint64_t median = epee::misc_utils::median(sz);
- if(median <= full_reward_zone)
- median = full_reward_zone;
+ uint64_t median = epee::misc_utils::median(sz);
+ if(median <= full_reward_zone)
+ median = full_reward_zone;
- m_current_block_cumul_sz_limit = median*2;
- return true;
+ m_current_block_cumul_sz_limit = median*2;
+ return true;
}
//------------------------------------------------------------------
bool Blockchain::add_new_block(const block& bl_, block_verification_context& bvc)
{
- LOG_PRINT_L3("Blockchain::" << __func__);
- //copy block here to let modify block.target
- block bl = bl_;
- crypto::hash id = get_block_hash(bl);
- CRITICAL_REGION_LOCAL(m_tx_pool);//to avoid deadlock lets lock tx_pool for whole add/reorganize process
- CRITICAL_REGION_LOCAL1(m_blockchain_lock);
- if(have_block(id))
- {
- LOG_PRINT_L3("block with id = " << id << " already exists");
- bvc.m_already_exists = true;
- return false;
- }
+ LOG_PRINT_L3("Blockchain::" << __func__);
+ //copy block here to let modify block.target
+ block bl = bl_;
+ crypto::hash id = get_block_hash(bl);
+ CRITICAL_REGION_LOCAL(m_tx_pool);//to avoid deadlock lets lock tx_pool for whole add/reorganize process
+ CRITICAL_REGION_LOCAL1(m_blockchain_lock);
+ if(have_block(id))
+ {
+ LOG_PRINT_L3("block with id = " << id << " already exists");
+ bvc.m_already_exists = true;
+ return false;
+ }
- //check that block refers to chain tail
- if(!(bl.prev_id == get_tail_id()))
- {
- //chain switching or wrong block
- bvc.m_added_to_main_chain = false;
- return handle_alternative_block(bl, id, bvc);
- //never relay alternative blocks
- }
+ //check that block refers to chain tail
+ if(!(bl.prev_id == get_tail_id()))
+ {
+ //chain switching or wrong block
+ bvc.m_added_to_main_chain = false;
+ return handle_alternative_block(bl, id, bvc);
+ //never relay alternative blocks
+ }
- return handle_block_to_main_chain(bl, id, bvc);
+ return handle_block_to_main_chain(bl, id, bvc);
}
//------------------------------------------------------------------
void Blockchain::check_against_checkpoints(const checkpoints& points, bool enforce)
{
- const auto& pts = points.get_points();
+ const auto& pts = points.get_points();
- for (const auto& pt : pts)
+ for (const auto& pt : pts)
+ {
+ // if the checkpoint is for a block we don't have yet, move on
+ if (pt.first >= m_db->height())
{
- // if the checkpoint is for a block we don't have yet, move on
- if (pt.first >= m_db->height())
- {
- continue;
- }
+ continue;
+ }
- if (!points.check_block(pt.first, m_db->get_block_hash_from_height(pt.first)))
- {
- // if asked to enforce checkpoints, roll back to a couple of blocks before the checkpoint
- if (enforce)
- {
- LOG_ERROR("Local blockchain failed to pass a checkpoint, rolling back!");
- std::list<block> empty;
- rollback_blockchain_switching(empty, pt.first - 2);
- }
- else
- {
- LOG_ERROR("WARNING: local blockchain failed to pass a MoneroPulse checkpoint, and you could be on a fork. You should either sync up from scratch, OR download a fresh blockchain bootstrap, OR enable checkpoint enforcing with the --enforce-dns-checkpointing command-line option");
- }
- }
+ if (!points.check_block(pt.first, m_db->get_block_hash_from_height(pt.first)))
+ {
+ // if asked to enforce checkpoints, roll back to a couple of blocks before the checkpoint
+ if (enforce)
+ {
+ LOG_ERROR("Local blockchain failed to pass a checkpoint, rolling back!");
+ std::list<block> empty;
+ rollback_blockchain_switching(empty, pt.first - 2);
+ }
+ else
+ {
+ LOG_ERROR("WARNING: local blockchain failed to pass a MoneroPulse checkpoint, and you could be on a fork. You should either sync up from scratch, OR download a fresh blockchain bootstrap, OR enable checkpoint enforcing with the --enforce-dns-checkpointing command-line option");
+ }
}
+ }
}
//------------------------------------------------------------------
// returns false if any of the checkpoints loading returns false.
@@ -2730,119 +2728,119 @@ void Blockchain::check_against_checkpoints(const checkpoints& points, bool enfor
// with an existing checkpoint.
bool Blockchain::update_checkpoints(const std::string& file_path, bool check_dns)
{
- if (!cryptonote::load_checkpoints_from_json(m_checkpoints, file_path))
+ if (!cryptonote::load_checkpoints_from_json(m_checkpoints, file_path))
+ {
+ return false;
+ }
+
+ // if we're checking both dns and json, load checkpoints from dns.
+ // if we're not hard-enforcing dns checkpoints, handle accordingly
+ if (m_enforce_dns_checkpoints && check_dns)
+ {
+ if (!cryptonote::load_checkpoints_from_dns(m_checkpoints))
{
- return false;
+ return false;
}
-
- // if we're checking both dns and json, load checkpoints from dns.
- // if we're not hard-enforcing dns checkpoints, handle accordingly
- if (m_enforce_dns_checkpoints && check_dns)
+ }
+ else if (check_dns)
+ {
+ checkpoints dns_points;
+ cryptonote::load_checkpoints_from_dns(dns_points);
+ if (m_checkpoints.check_for_conflicts(dns_points))
{
- if (!cryptonote::load_checkpoints_from_dns(m_checkpoints))
- {
- return false;
- }
+ check_against_checkpoints(dns_points, false);
}
- else if (check_dns)
+ else
{
- checkpoints dns_points;
- cryptonote::load_checkpoints_from_dns(dns_points);
- if (m_checkpoints.check_for_conflicts(dns_points))
- {
- check_against_checkpoints(dns_points, false);
- }
- else
- {
- LOG_PRINT_L0("One or more checkpoints fetched from DNS conflicted with existing checkpoints!");
- }
+ LOG_PRINT_L0("One or more checkpoints fetched from DNS conflicted with existing checkpoints!");
}
+ }
- check_against_checkpoints(m_checkpoints, true);
+ check_against_checkpoints(m_checkpoints, true);
- return true;
+ return true;
}
//------------------------------------------------------------------
void Blockchain::set_enforce_dns_checkpoints(bool enforce_checkpoints)
{
- m_enforce_dns_checkpoints = enforce_checkpoints;
+ m_enforce_dns_checkpoints = enforce_checkpoints;
}
//------------------------------------------------------------------
void Blockchain::block_longhash_worker(const uint64_t height, const std::vector<block> &blocks, std::unordered_map<crypto::hash, crypto::hash> &map) const
{
- TIME_MEASURE_START(t);
- slow_hash_allocate_state();
+ TIME_MEASURE_START(t);
+ slow_hash_allocate_state();
- for (const auto & block : blocks)
- {
- crypto::hash id = get_block_hash(block);
- crypto::hash pow = get_block_longhash(block, height);
- map.emplace(id, pow);
- }
+ for (const auto & block : blocks)
+ {
+ crypto::hash id = get_block_hash(block);
+ crypto::hash pow = get_block_longhash(block, height);
+ map.emplace(id, pow);
+ }
- slow_hash_free_state();
- TIME_MEASURE_FINISH(t);
+ slow_hash_free_state();
+ TIME_MEASURE_FINISH(t);
}
//------------------------------------------------------------------
bool Blockchain::cleanup_handle_incoming_blocks(bool force_sync)
{
- LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
- TIME_MEASURE_START(t1);
+ LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ TIME_MEASURE_START(t1);
- if (m_sync_counter > 0)
+ if (m_sync_counter > 0)
+ {
+ if (force_sync)
{
- if (force_sync)
- {
- if(m_db_sync_mode != db_nosync)
- store_blockchain();
- m_sync_counter = 0;
- }
- else if (m_sync_counter >= m_db_blocks_per_sync)
- {
- if(m_db_sync_mode == db_async)
- {
- m_sync_counter = 0;
- m_async_service.dispatch(boost::bind(&Blockchain::store_blockchain, this));
- }
- else if(m_db_sync_mode == db_sync)
- {
- store_blockchain();
- }
- else // db_nosync
- {
- // DO NOTHING, not required to call sync.
- }
- }
+ if(m_db_sync_mode != db_nosync)
+ store_blockchain();
+ m_sync_counter = 0;
}
+ else if (m_sync_counter >= m_db_blocks_per_sync)
+ {
+ if(m_db_sync_mode == db_async)
+ {
+ m_sync_counter = 0;
+ m_async_service.dispatch(boost::bind(&Blockchain::store_blockchain, this));
+ }
+ else if(m_db_sync_mode == db_sync)
+ {
+ store_blockchain();
+ }
+ else // db_nosync
+ {
+ // DO NOTHING, not required to call sync.
+ }
+ }
+ }
- TIME_MEASURE_FINISH(t1);
- m_blocks_longhash_table.clear();
- m_scan_table.clear();
- m_check_tx_inputs_table.clear();
- m_blocks_txs_check.clear();
- m_check_txin_table.clear();
+ TIME_MEASURE_FINISH(t1);
+ m_blocks_longhash_table.clear();
+ m_scan_table.clear();
+ m_check_tx_inputs_table.clear();
+ m_blocks_txs_check.clear();
+ m_check_txin_table.clear();
- return true;
+ return true;
}
//------------------------------------------------------------------
void Blockchain::output_scan_worker(const uint64_t amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, std::unordered_map<crypto::hash, cryptonote::transaction> &txs) const
{
- try
- {
- m_db->get_output_key(amount, offsets, outputs);
- }
- catch (const std::exception& e)
- {
- LOG_PRINT_L1("EXCEPTION: " << e.what());
- }
- catch (...)
- {
+ try
+ {
+ m_db->get_output_key(amount, offsets, outputs);
+ }
+ catch (const std::exception& e)
+ {
+ LOG_PRINT_L1("EXCEPTION: " << e.what());
+ }
+ catch (...)
+ {
- }
+ }
}
//------------------------------------------------------------------
@@ -2854,137 +2852,137 @@ void Blockchain::output_scan_worker(const uint64_t amount, const std::vector<uin
// keys.
bool Blockchain::prepare_handle_incoming_blocks(const std::list<block_complete_entry> &blocks_entry)
{
- LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3);
- TIME_MEASURE_START(prepare);
- CRITICAL_REGION_LOCAL(m_blockchain_lock);
+ LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3);
+ TIME_MEASURE_START(prepare);
+ CRITICAL_REGION_LOCAL(m_blockchain_lock);
- if(blocks_entry.size() == 0)
- return false;
+ if(blocks_entry.size() == 0)
+ return false;
- if ((m_db->height() + blocks_entry.size()) < m_blocks_hash_check.size())
- return true;
+ if ((m_db->height() + blocks_entry.size()) < m_blocks_hash_check.size())
+ return true;
- bool blocks_exist = false;
- uint64_t threads = std::thread::hardware_concurrency();
+ bool blocks_exist = false;
+ uint64_t threads = std::thread::hardware_concurrency();
- if (blocks_entry.size() > 1 && threads > 1 && m_max_prepare_blocks_threads > 1)
- {
- // limit threads, default limit = 4
- if(threads > m_max_prepare_blocks_threads)
- threads = m_max_prepare_blocks_threads;
+ if (blocks_entry.size() > 1 && threads > 1 && m_max_prepare_blocks_threads > 1)
+ {
+ // limit threads, default limit = 4
+ if(threads > m_max_prepare_blocks_threads)
+ threads = m_max_prepare_blocks_threads;
- uint64_t height = m_db->height();
- std::vector<boost::thread *> thread_list;
- int batches = blocks_entry.size() / threads;
- int extra = blocks_entry.size() % threads;
- LOG_PRINT_L1("block_batches: " << batches);
- std::vector<std::unordered_map<crypto::hash, crypto::hash>> maps(threads);
- std::vector < std::vector < block >> blocks(threads);
- auto it = blocks_entry.begin();
+ uint64_t height = m_db->height();
+ std::vector<boost::thread *> thread_list;
+ int batches = blocks_entry.size() / threads;
+ int extra = blocks_entry.size() % threads;
+ LOG_PRINT_L1("block_batches: " << batches);
+ std::vector<std::unordered_map<crypto::hash, crypto::hash>> maps(threads);
+ std::vector < std::vector < block >> blocks(threads);
+ auto it = blocks_entry.begin();
+
+ for (uint64_t i = 0; i < threads; i++)
+ {
+ for (int j = 0; j < batches; j++)
+ {
+ block block;
- for (uint64_t i = 0; i < threads; i++)
+ if (!parse_and_validate_block_from_blob(it->block, block))
{
- for (int j = 0; j < batches; j++)
- {
- block block;
-
- if (!parse_and_validate_block_from_blob(it->block, block))
- {
- std::advance(it, 1);
- continue;
- }
-
- // check first block and skip all blocks if its not chained properly
- if (i == 0 && j == 0)
- {
- crypto::hash tophash = m_db->top_block_hash();
- if (block.prev_id != tophash)
- {
- LOG_PRINT_L1("Skipping prepare blocks. New blocks don't belong to chain.")
- return true;
- }
- }
- if (have_block(get_block_hash(block)))
- {
- blocks_exist = true;
- break;
- }
-
- blocks[i].push_back(block);
- std::advance(it, 1);
- }
+ std::advance(it, 1);
+ continue;
}
- for (int i = 0; i < extra && !blocks_exist; i++)
+ // check first block and skip all blocks if its not chained properly
+ if (i == 0 && j == 0)
{
- block block;
-
- if (!parse_and_validate_block_from_blob(it->block, block))
- {
- std::advance(it, 1);
- continue;
- }
-
- if (have_block(get_block_hash(block)))
- {
- blocks_exist = true;
- break;
- }
-
- blocks[i].push_back(block);
- std::advance(it, 1);
+ crypto::hash tophash = m_db->top_block_hash();
+ if (block.prev_id != tophash)
+ {
+ LOG_PRINT_L1("Skipping prepare blocks. New blocks don't belong to chain.")
+ return true;
+ }
}
-
- if (!blocks_exist)
+ if (have_block(get_block_hash(block)))
{
- m_blocks_longhash_table.clear();
- for (uint64_t i = 0; i < threads; i++)
- {
- thread_list.push_back(new boost::thread(&Blockchain::block_longhash_worker, this, height + (i * batches), std::cref(blocks[i]), std::ref(maps[i])));
- }
+ blocks_exist = true;
+ break;
+ }
- for (size_t j = 0; j < thread_list.size(); j++)
- {
- thread_list[j]->join();
- delete thread_list[j];
- }
+ blocks[i].push_back(block);
+ std::advance(it, 1);
+ }
+ }
- thread_list.clear();
+ for (int i = 0; i < extra && !blocks_exist; i++)
+ {
+ block block;
- for (const auto & map : maps)
- {
- m_blocks_longhash_table.insert(map.begin(), map.end());
- }
- }
+ if (!parse_and_validate_block_from_blob(it->block, block))
+ {
+ std::advance(it, 1);
+ continue;
+ }
+
+ if (have_block(get_block_hash(block)))
+ {
+ blocks_exist = true;
+ break;
+ }
+
+ blocks[i].push_back(block);
+ std::advance(it, 1);
}
- if (blocks_exist)
+ if (!blocks_exist)
{
- LOG_PRINT_L0("Skipping prepare blocks. Blocks exist.")
- return true;
+ m_blocks_longhash_table.clear();
+ for (uint64_t i = 0; i < threads; i++)
+ {
+ thread_list.push_back(new boost::thread(&Blockchain::block_longhash_worker, this, height + (i * batches), std::cref(blocks[i]), std::ref(maps[i])));
+ }
+
+ for (size_t j = 0; j < thread_list.size(); j++)
+ {
+ thread_list[j]->join();
+ delete thread_list[j];
+ }
+
+ thread_list.clear();
+
+ for (const auto & map : maps)
+ {
+ m_blocks_longhash_table.insert(map.begin(), map.end());
+ }
}
+ }
- m_fake_scan_time = 0;
- m_fake_pow_calc_time = 0;
+ if (blocks_exist)
+ {
+ LOG_PRINT_L0("Skipping prepare blocks. Blocks exist.")
+ return true;
+ }
- m_scan_table.clear();
- m_check_tx_inputs_table.clear();
- m_check_txin_table.clear();
+ m_fake_scan_time = 0;
+ m_fake_pow_calc_time = 0;
- TIME_MEASURE_FINISH(prepare);
- m_fake_pow_calc_time = prepare / blocks_entry.size();
+ m_scan_table.clear();
+ m_check_tx_inputs_table.clear();
+ m_check_txin_table.clear();
- if (blocks_entry.size() > 1 && threads > 1 && m_show_time_stats)
- LOG_PRINT_L0("Prepare blocks took: " << prepare << " ms");
+ TIME_MEASURE_FINISH(prepare);
+ m_fake_pow_calc_time = prepare / blocks_entry.size();
- TIME_MEASURE_START(scantable);
+ if (blocks_entry.size() > 1 && threads > 1 && m_show_time_stats)
+ LOG_PRINT_L0("Prepare blocks took: " << prepare << " ms");
- // [input] stores all unique amounts found
- std::vector < uint64_t > amounts;
- // [input] stores all absolute_offsets for each amount
- std::map<uint64_t, std::vector<uint64_t>> offset_map;
- // [output] stores all output_data_t for each absolute_offset
- std::map<uint64_t, std::vector<output_data_t>> tx_map;
+ TIME_MEASURE_START(scantable);
+
+ // [input] stores all unique amounts found
+ std::vector < uint64_t > amounts;
+ // [input] stores all absolute_offsets for each amount
+ std::map<uint64_t, std::vector<uint64_t>> offset_map;
+ // [output] stores all output_data_t for each absolute_offset
+ std::map<uint64_t, std::vector<output_data_t>> tx_map;
#define SCAN_TABLE_QUIT(m) \
do { \
@@ -2993,191 +2991,191 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::list<block_complete_e
return false; \
} while(0); \
- // generate sorted tables for all amounts and absolute offsets
- for (const auto &entry : blocks_entry)
+ // generate sorted tables for all amounts and absolute offsets
+ for (const auto &entry : blocks_entry)
+ {
+ for (const auto &tx_blob : entry.txs)
{
- for (const auto &tx_blob : entry.txs)
- {
- crypto::hash tx_hash = null_hash;
- crypto::hash tx_prefix_hash = null_hash;
- transaction tx;
+ crypto::hash tx_hash = null_hash;
+ crypto::hash tx_prefix_hash = null_hash;
+ transaction tx;
- if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash, tx_prefix_hash))
- SCAN_TABLE_QUIT("Could not parse tx from incoming blocks.");
+ if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash, tx_prefix_hash))
+ SCAN_TABLE_QUIT("Could not parse tx from incoming blocks.");
- auto its = m_scan_table.find(tx_prefix_hash);
- if (its != m_scan_table.end())
- SCAN_TABLE_QUIT("Duplicate tx found from incoming blocks.");
+ auto its = m_scan_table.find(tx_prefix_hash);
+ if (its != m_scan_table.end())
+ SCAN_TABLE_QUIT("Duplicate tx found from incoming blocks.");
- m_scan_table.emplace(tx_prefix_hash, std::unordered_map<crypto::key_image, std::vector<output_data_t>>());
- its = m_scan_table.find(tx_prefix_hash);
- assert(its != m_scan_table.end());
+ m_scan_table.emplace(tx_prefix_hash, std::unordered_map<crypto::key_image, std::vector<output_data_t>>());
+ its = m_scan_table.find(tx_prefix_hash);
+ assert(its != m_scan_table.end());
- // get all amounts from tx.vin(s)
- for (const auto &txin : tx.vin)
- {
- const txin_to_key &in_to_key = boost::get < txin_to_key > (txin);
+ // get all amounts from tx.vin(s)
+ for (const auto &txin : tx.vin)
+ {
+ const txin_to_key &in_to_key = boost::get < txin_to_key > (txin);
- // check for duplicate
- auto it = its->second.find(in_to_key.k_image);
- if (it != its->second.end())
- SCAN_TABLE_QUIT("Duplicate key_image found from incoming blocks.");
+ // check for duplicate
+ auto it = its->second.find(in_to_key.k_image);
+ if (it != its->second.end())
+ SCAN_TABLE_QUIT("Duplicate key_image found from incoming blocks.");
- amounts.push_back(in_to_key.amount);
- }
+ amounts.push_back(in_to_key.amount);
+ }
- // sort and remove duplicate amounts from amounts list
- std::sort(amounts.begin(), amounts.end());
- auto last = std::unique(amounts.begin(), amounts.end());
- amounts.erase(last, amounts.end());
+ // sort and remove duplicate amounts from amounts list
+ std::sort(amounts.begin(), amounts.end());
+ auto last = std::unique(amounts.begin(), amounts.end());
+ amounts.erase(last, amounts.end());
- // add amount to the offset_map and tx_map
- for (const uint64_t &amount : amounts)
- {
- if (offset_map.find(amount) == offset_map.end())
- offset_map.emplace(amount, std::vector<uint64_t>());
+ // add amount to the offset_map and tx_map
+ for (const uint64_t &amount : amounts)
+ {
+ if (offset_map.find(amount) == offset_map.end())
+ offset_map.emplace(amount, std::vector<uint64_t>());
- if (tx_map.find(amount) == tx_map.end())
- tx_map.emplace(amount, std::vector<output_data_t>());
- }
+ if (tx_map.find(amount) == tx_map.end())
+ tx_map.emplace(amount, std::vector<output_data_t>());
+ }
- // add new absolute_offsets to offset_map
- for (const auto &txin : tx.vin)
- {
- const txin_to_key &in_to_key = boost::get < txin_to_key > (txin);
- // no need to check for duplicate here.
- auto absolute_offsets = relative_output_offsets_to_absolute(in_to_key.key_offsets);
- for (const auto & offset : absolute_offsets)
- offset_map[in_to_key.amount].push_back(offset);
+ // add new absolute_offsets to offset_map
+ for (const auto &txin : tx.vin)
+ {
+ const txin_to_key &in_to_key = boost::get < txin_to_key > (txin);
+ // no need to check for duplicate here.
+ auto absolute_offsets = relative_output_offsets_to_absolute(in_to_key.key_offsets);
+ for (const auto & offset : absolute_offsets)
+ offset_map[in_to_key.amount].push_back(offset);
- }
+ }
- // sort and remove duplicate absolute_offsets in offset_map
- for (auto &offsets : offset_map)
- {
- std::sort(offsets.second.begin(), offsets.second.end());
- auto last = std::unique(offsets.second.begin(), offsets.second.end());
- offsets.second.erase(last, offsets.second.end());
- }
- }
+ // sort and remove duplicate absolute_offsets in offset_map
+ for (auto &offsets : offset_map)
+ {
+ std::sort(offsets.second.begin(), offsets.second.end());
+ auto last = std::unique(offsets.second.begin(), offsets.second.end());
+ offsets.second.erase(last, offsets.second.end());
+ }
}
+ }
- // [output] stores all transactions for each tx_out_index::hash found
- std::vector<std::unordered_map<crypto::hash, cryptonote::transaction>> transactions(amounts.size());
+ // [output] stores all transactions for each tx_out_index::hash found
+ std::vector<std::unordered_map<crypto::hash, cryptonote::transaction>> transactions(amounts.size());
- threads = std::thread::hardware_concurrency();
- if (!m_db->can_thread_bulk_indices())
- threads = 1;
+ threads = std::thread::hardware_concurrency();
+ if (!m_db->can_thread_bulk_indices())
+ threads = 1;
- if (threads > 1)
- {
- boost::asio::io_service ioservice;
- boost::thread_group threadpool;
- std::unique_ptr < boost::asio::io_service::work > work(new boost::asio::io_service::work(ioservice));
-
- for (uint64_t i = 0; i < threads; i++)
- {
- threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioservice));
- }
+ if (threads > 1)
+ {
+ boost::asio::io_service ioservice;
+ boost::thread_group threadpool;
+ std::unique_ptr < boost::asio::io_service::work > work(new boost::asio::io_service::work(ioservice));
- for (size_t i = 0; i < amounts.size(); i++)
- {
- uint64_t amount = amounts[i];
- ioservice.dispatch(boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount]), std::ref(transactions[i])));
- }
+ for (uint64_t i = 0; i < threads; i++)
+ {
+ threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioservice));
+ }
- work.reset();
- threadpool.join_all();
- ioservice.stop();
+ for (size_t i = 0; i < amounts.size(); i++)
+ {
+ uint64_t amount = amounts[i];
+ ioservice.dispatch(boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount]), std::ref(transactions[i])));
}
- else
+
+ work.reset();
+ threadpool.join_all();
+ ioservice.stop();
+ }
+ else
+ {
+ for (size_t i = 0; i < amounts.size(); i++)
{
- for (size_t i = 0; i < amounts.size(); i++)
- {
- uint64_t amount = amounts[i];
- output_scan_worker(amount, offset_map[amount], tx_map[amount], transactions[i]);
- }
+ uint64_t amount = amounts[i];
+ output_scan_worker(amount, offset_map[amount], tx_map[amount], transactions[i]);
}
+ }
- int total_txs = 0;
+ int total_txs = 0;
- // now generate a table for each tx_prefix and k_image hashes
- for (const auto &entry : blocks_entry)
+ // now generate a table for each tx_prefix and k_image hashes
+ for (const auto &entry : blocks_entry)
+ {
+ for (const auto &tx_blob : entry.txs)
{
- for (const auto &tx_blob : entry.txs)
- {
- crypto::hash tx_hash = null_hash;
- crypto::hash tx_prefix_hash = null_hash;
- transaction tx;
+ crypto::hash tx_hash = null_hash;
+ crypto::hash tx_prefix_hash = null_hash;
+ transaction tx;
+
+ if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash, tx_prefix_hash))
+ SCAN_TABLE_QUIT("Could not parse tx from incoming blocks.");
- if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash, tx_prefix_hash))
- SCAN_TABLE_QUIT("Could not parse tx from incoming blocks.");
+ ++total_txs;
+ auto its = m_scan_table.find(tx_prefix_hash);
+ if (its == m_scan_table.end())
+ SCAN_TABLE_QUIT("Tx not found on scan table from incoming blocks.");
- ++total_txs;
- auto its = m_scan_table.find(tx_prefix_hash);
- if (its == m_scan_table.end())
- SCAN_TABLE_QUIT("Tx not found on scan table from incoming blocks.");
+ for (const auto &txin : tx.vin)
+ {
+ const txin_to_key &in_to_key = boost::get < txin_to_key > (txin);
+ auto needed_offsets = relative_output_offsets_to_absolute(in_to_key.key_offsets);
+
+ std::vector<output_data_t> outputs;
+ for (const uint64_t & offset_needed : needed_offsets)
+ {
+ size_t pos = 0;
+ bool found = false;
- for (const auto &txin : tx.vin)
+ for (const uint64_t &offset_found : offset_map[in_to_key.amount])
+ {
+ if (offset_needed == offset_found)
{
- const txin_to_key &in_to_key = boost::get < txin_to_key > (txin);
- auto needed_offsets = relative_output_offsets_to_absolute(in_to_key.key_offsets);
-
- std::vector<output_data_t> outputs;
- for (const uint64_t & offset_needed : needed_offsets)
- {
- size_t pos = 0;
- bool found = false;
-
- for (const uint64_t &offset_found : offset_map[in_to_key.amount])
- {
- if (offset_needed == offset_found)
- {
- found = true;
- break;
- }
-
- ++pos;
- }
-
- if (found && pos < tx_map[in_to_key.amount].size())
- outputs.push_back(tx_map[in_to_key.amount].at(pos));
- else
- break;
- }
-
- its->second.emplace(in_to_key.k_image, outputs);
+ found = true;
+ break;
}
+
+ ++pos;
+ }
+
+ if (found && pos < tx_map[in_to_key.amount].size())
+ outputs.push_back(tx_map[in_to_key.amount].at(pos));
+ else
+ break;
}
- }
- TIME_MEASURE_FINISH(scantable);
- if (total_txs > 0)
- {
- m_fake_scan_time = scantable / total_txs;
- if(m_show_time_stats)
- LOG_PRINT_L0("Prepare scantable took: " << scantable << " ms");
+ its->second.emplace(in_to_key.k_image, outputs);
+ }
}
+ }
- return true;
+ TIME_MEASURE_FINISH(scantable);
+ if (total_txs > 0)
+ {
+ m_fake_scan_time = scantable / total_txs;
+ if(m_show_time_stats)
+ LOG_PRINT_L0("Prepare scantable took: " << scantable << " ms");
+ }
+
+ return true;
}
void Blockchain::set_user_options(uint64_t maxthreads, uint64_t blocks_per_sync, blockchain_db_sync_mode sync_mode, bool fast_sync)
{
- m_db_sync_mode = sync_mode;
- m_fast_sync = fast_sync;
- m_db_blocks_per_sync = blocks_per_sync;
- m_max_prepare_blocks_threads = maxthreads;
+ m_db_sync_mode = sync_mode;
+ m_fast_sync = fast_sync;
+ m_db_blocks_per_sync = blocks_per_sync;
+ m_max_prepare_blocks_threads = maxthreads;
}
HardFork::State Blockchain::get_hard_fork_state() const
{
- return m_hardfork->get_state();
+ return m_hardfork->get_state();
}
bool Blockchain::get_hard_fork_voting_info(uint8_t version, uint32_t &window, uint32_t &votes, uint32_t &threshold, uint8_t &voting) const
{
- return m_hardfork->get_voting_info(version, window, votes, threshold, voting);
+ return m_hardfork->get_voting_info(version, window, votes, threshold, voting);
}
bool Blockchain::for_all_key_images(std::function<bool(const crypto::key_image&)> f) const
diff --git a/src/cryptonote_core/blockchain.h b/src/cryptonote_core/blockchain.h
index f0b03ab0a..e8f5a7e5b 100644
--- a/src/cryptonote_core/blockchain.h
+++ b/src/cryptonote_core/blockchain.h
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#pragma once
@@ -61,11 +61,11 @@ namespace cryptonote
enum blockchain_db_sync_mode
{
- db_sync,
- db_async,
- db_nosync
+ db_sync,
+ db_async,
+ db_nosync
};
-
+
/************************************************************************/
/* */
/************************************************************************/
@@ -153,10 +153,10 @@ namespace cryptonote
// user options, must be called before calling init()
void set_user_options(uint64_t block_threads, uint64_t blocks_per_sync,
- blockchain_db_sync_mode sync_mode, bool fast_sync);
+ blockchain_db_sync_mode sync_mode, bool fast_sync);
void set_show_time_stats(bool stats) { m_show_time_stats = stats; }
-
+
HardFork::State get_hard_fork_state() const;
uint8_t get_current_hard_fork_version() const { return m_hardfork->get_current_version(); }
uint8_t get_ideal_hard_fork_version() const { return m_hardfork->get_ideal_version(); }
@@ -174,12 +174,12 @@ namespace cryptonote
return *m_db;
}
- void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets,
- std::vector<output_data_t> &outputs, std::unordered_map<crypto::hash,
- cryptonote::transaction> &txs) const;
+ void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets,
+ std::vector<output_data_t> &outputs, std::unordered_map<crypto::hash,
+ cryptonote::transaction> &txs) const;
- void block_longhash_worker(const uint64_t height, const std::vector<block> &blocks,
- std::unordered_map<crypto::hash, crypto::hash> &map) const;
+ void block_longhash_worker(const uint64_t height, const std::vector<block> &blocks,
+ std::unordered_map<crypto::hash, crypto::hash> &map) const;
private:
typedef std::unordered_map<crypto::hash, size_t> blocks_by_id_index;
typedef std::unordered_map<crypto::hash, transaction_chain_entry> transactions_container;
@@ -208,22 +208,22 @@ namespace cryptonote
// SHA-3 hashes for each block and for fast pow checking
std::vector<crypto::hash> m_blocks_hash_check;
std::vector<crypto::hash> m_blocks_txs_check;
-
- blockchain_db_sync_mode m_db_sync_mode;
- bool m_fast_sync;
- bool m_show_time_stats;
- uint64_t m_db_blocks_per_sync;
- uint64_t m_max_prepare_blocks_threads;
+
+ blockchain_db_sync_mode m_db_sync_mode;
+ bool m_fast_sync;
+ bool m_show_time_stats;
+ uint64_t m_db_blocks_per_sync;
+ uint64_t m_max_prepare_blocks_threads;
uint64_t m_fake_pow_calc_time;
uint64_t m_fake_scan_time;
- uint64_t m_sync_counter;
- std::vector<uint64_t> m_timestamps;
- std::vector<difficulty_type> m_difficulties;
- uint64_t m_timestamps_and_difficulties_height;
+ uint64_t m_sync_counter;
+ std::vector<uint64_t> m_timestamps;
+ std::vector<difficulty_type> m_difficulties;
+ uint64_t m_timestamps_and_difficulties_height;
- boost::asio::io_service m_async_service;
- boost::thread_group m_async_pool;
- std::unique_ptr<boost::asio::io_service::work> m_async_work_idle;
+ boost::asio::io_service m_async_service;
+ boost::thread_group m_async_pool;
+ std::unique_ptr<boost::asio::io_service::work> m_async_work_idle;
// all alternative chains
blocks_ext_by_hash m_alternative_chains; // crypto::hash -> block_extended_info
@@ -273,6 +273,6 @@ namespace cryptonote
bool check_for_double_spend(const transaction& tx, key_images_container& keys_this_block) const;
void get_timestamp_and_difficulty(uint64_t &timestamp, difficulty_type &difficulty, const int offset) const;
void check_ring_signature(const crypto::hash &tx_prefix_hash, const crypto::key_image &key_image,
- const std::vector<crypto::public_key> &pubkeys, const std::vector<crypto::signature> &sig, uint64_t &result);
+ const std::vector<crypto::public_key> &pubkeys, const std::vector<crypto::signature> &sig, uint64_t &result);
};
} // namespace cryptonote
diff --git a/src/cryptonote_core/blockchain_storage.cpp b/src/cryptonote_core/blockchain_storage.cpp
index 72bd05f6a..ee8fac368 100644
--- a/src/cryptonote_core/blockchain_storage.cpp
+++ b/src/cryptonote_core/blockchain_storage.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <algorithm>
@@ -97,11 +97,11 @@ bool blockchain_storage::init(const std::string& config_folder, bool testnet)
{
// checkpoints
-
+
// mainchain
- for (size_t height=0; height < m_blocks.size(); ++height)
+ for (size_t height=0; height < m_blocks.size(); ++height)
{
- CHECK_AND_ASSERT_MES((!m_checkpoints.is_in_checkpoint_zone(height)) || m_checkpoints.check_block(height,get_block_hash(m_blocks[height].bl)),false,"checkpoint fail, blockchain.bin invalid");
+ CHECK_AND_ASSERT_MES((!m_checkpoints.is_in_checkpoint_zone(height)) || m_checkpoints.check_block(height,get_block_hash(m_blocks[height].bl)),false,"checkpoint fail, blockchain.bin invalid");
}
// check alt chains
@@ -110,7 +110,7 @@ bool blockchain_storage::init(const std::string& config_folder, bool testnet)
// see issue #118
BOOST_FOREACH(blocks_ext_by_hash::value_type& alt_block, m_alternative_chains)
{
- CHECK_AND_ASSERT_MES(m_checkpoints.is_alternative_block_allowed(m_blocks.size()-1,alt_block.second.height),false,"stored alternative block not allowed, blockchain.bin invalid");
+ CHECK_AND_ASSERT_MES(m_checkpoints.is_alternative_block_allowed(m_blocks.size()-1,alt_block.second.height),false,"stored alternative block not allowed, blockchain.bin invalid");
}
#endif
}
@@ -661,7 +661,7 @@ bool blockchain_storage::create_block_template(block& b, const account_public_ad
b.timestamp = time(NULL);
height = m_blocks.size();
diffic = get_difficulty_for_next_block();
- CHECK_AND_ASSERT_MES(diffic, false, "difficulty owverhead.");
+ CHECK_AND_ASSERT_MES(diffic, false, "difficulty overhead.");
median_size = m_current_block_cumul_sz_limit / 2;
already_generated_coins = m_blocks.back().already_generated_coins;
@@ -1072,13 +1072,13 @@ bool blockchain_storage::get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDO
size_t try_count = 0;
for(uint64_t j = 0; j != req.outs_count && try_count < up_index_limit;)
{
- // triangular distribution over [a,b) with a=0, mode c=b=up_index_limit
+ // triangular distribution over [a,b) with a=0, mode c=b=up_index_limit
uint64_t r = crypto::rand<uint64_t>() % ((uint64_t)1 << 53);
- double frac = std::sqrt((double)r / ((uint64_t)1 << 53));
- size_t i = (size_t)(frac*up_index_limit);
- // just in case rounding up to 1 occurs after sqrt
- if (i == up_index_limit)
- --i;
+ double frac = std::sqrt((double)r / ((uint64_t)1 << 53));
+ size_t i = (size_t)(frac*up_index_limit);
+ // just in case rounding up to 1 occurs after sqrt
+ if (i == up_index_limit)
+ --i;
if(used.count(i))
continue;
bool added = add_out_to_get_random_outs(amount_outs, result_outs, amount, i);
@@ -1155,27 +1155,27 @@ uint64_t blockchain_storage::block_difficulty(size_t i) const
//------------------------------------------------------------------
double blockchain_storage::get_avg_block_size( size_t count) const
{
- if (count > get_current_blockchain_height()) return 500;
+ if (count > get_current_blockchain_height()) return 500;
+
+ double average = 0;
+ _dbg1_c("net/blksize", "HEIGHT: " << get_current_blockchain_height());
+ _dbg1_c("net/blksize", "BLOCK ID BY HEIGHT: " << get_block_id_by_height(get_current_blockchain_height()) );
+ _dbg1_c("net/blksize", "BLOCK TAIL ID: " << get_tail_id() );
+ std::vector<size_t> size_vector;
- double average = 0;
- _dbg1_c("net/blksize", "HEIGHT: " << get_current_blockchain_height());
- _dbg1_c("net/blksize", "BLOCK ID BY HEIGHT: " << get_block_id_by_height(get_current_blockchain_height()) );
- _dbg1_c("net/blksize", "BLOCK TAIL ID: " << get_tail_id() );
- std::vector<size_t> size_vector;
+ get_backward_blocks_sizes(get_current_blockchain_height() - count, size_vector, count);
- get_backward_blocks_sizes(get_current_blockchain_height() - count, size_vector, count);
+ std::vector<size_t>::iterator it;
+ it = size_vector.begin();
+ while (it != size_vector.end()) {
+ average += *it;
+ _dbg2_c("net/blksize", "VECTOR ELEMENT: " << (*it) );
+ it++;
+ }
+ average = average / count;
+ _dbg1_c("net/blksize", "VECTOR SIZE: " << size_vector.size() << " average=" << average);
- std::vector<size_t>::iterator it;
- it = size_vector.begin();
- while (it != size_vector.end()) {
- average += *it;
- _dbg2_c("net/blksize", "VECTOR ELEMENT: " << (*it) );
- it++;
- }
- average = average / count;
- _dbg1_c("net/blksize", "VECTOR SIZE: " << size_vector.size() << " average=" << average);
-
- return average;
+ return average;
}
//------------------------------------------------------------------
void blockchain_storage::print_blockchain(uint64_t start_index, uint64_t end_index) const
@@ -1248,7 +1248,7 @@ bool blockchain_storage::find_blockchain_supplement(const uint64_t req_start_blo
{
CRITICAL_REGION_LOCAL(m_blockchain_lock);
if(req_start_block > 0) {
- start_height = req_start_block;
+ start_height = req_start_block;
} else {
if(!find_blockchain_supplement(qblock_ids, start_height))
return false;
@@ -1739,9 +1739,9 @@ bool blockchain_storage::handle_block_to_main_chain(const block& bl, const crypt
bei.block_cumulative_size = cumulative_block_size;
bei.cumulative_difficulty = current_diffic;
- // In the "tail" state when the minimum subsidy (implemented in get_block_reward) is in effect, the number of
- // coins will eventually exceed MONEY_SUPPLY and overflow a uint64. To prevent overflow, cap already_generated_coins
- // at MONEY_SUPPLY. already_generated_coins is only used to compute the block subsidy and MONEY_SUPPLY yields a
+ // In the "tail" state when the minimum subsidy (implemented in get_block_reward) is in effect, the number of
+ // coins will eventually exceed MONEY_SUPPLY and overflow a uint64. To prevent overflow, cap already_generated_coins
+ // at MONEY_SUPPLY. already_generated_coins is only used to compute the block subsidy and MONEY_SUPPLY yields a
// subsidy of 0 under the base formula and therefore the minimum subsidy >0 in the tail state.
bei.already_generated_coins = base_reward < (MONEY_SUPPLY-already_generated_coins) ? already_generated_coins + base_reward : MONEY_SUPPLY;
@@ -1770,7 +1770,7 @@ bool blockchain_storage::handle_block_to_main_chain(const block& bl, const crypt
<< "), coinbase_blob_size: " << coinbase_blob_size << ", cumulative size: " << cumulative_block_size
<< ", " << block_processing_time << "("<< target_calculating_time << "/" << longhash_calculating_time << ")ms");
- epee::net_utils::data_logger::get_instance().add_data("blockchain_processing_time", block_processing_time);
+ epee::net_utils::data_logger::get_instance().add_data("blockchain_processing_time", block_processing_time);
bvc.m_added_to_main_chain = true;
/*if(!m_orphanes_reorganize_in_work)
@@ -1837,13 +1837,13 @@ void blockchain_storage::check_against_checkpoints(const checkpoints& points, bo
// if asked to enforce checkpoints, roll back to a couple of blocks before the checkpoint
if (enforce)
{
- LOG_ERROR("Local blockchain failed to pass a checkpoint, rolling back!");
- std::list<block> empty;
- rollback_blockchain_switching(empty, pt.first - 2);
+ LOG_ERROR("Local blockchain failed to pass a checkpoint, rolling back!");
+ std::list<block> empty;
+ rollback_blockchain_switching(empty, pt.first - 2);
}
else
{
- LOG_ERROR("WARNING: local blockchain failed to pass a MoneroPulse checkpoint, and you could be on a fork. You should either sync up from scratch, OR download a fresh blockchain bootstrap, OR enable checkpoint enforcing with the --enforce-dns-checkpointing command-line option");
+ LOG_ERROR("WARNING: local blockchain failed to pass a MoneroPulse checkpoint, and you could be on a fork. You should either sync up from scratch, OR download a fresh blockchain bootstrap, OR enable checkpoint enforcing with the --enforce-dns-checkpointing command-line option");
}
}
}
diff --git a/src/cryptonote_core/checkpoints_create.cpp b/src/cryptonote_core/checkpoints_create.cpp
index de7d65009..2360c56de 100644
--- a/src/cryptonote_core/checkpoints_create.cpp
+++ b/src/cryptonote_core/checkpoints_create.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include "checkpoints_create.h"
@@ -46,11 +46,11 @@ namespace
bool ok = false;
for (const auto& record_in_b : b)
{
- if (record_in_a == record_in_b)
- {
- ok = true;
- break;
- }
+ if (record_in_a == record_in_b)
+ {
+ ok = true;
+ break;
+ }
}
if (!ok) return false;
}
@@ -62,25 +62,25 @@ namespace
namespace cryptonote
{
-struct t_hashline
+struct t_hashline
{
- uint64_t height;
- std::string hash;
- BEGIN_KV_SERIALIZE_MAP()
- KV_SERIALIZE(height)
- KV_SERIALIZE(hash)
- END_KV_SERIALIZE_MAP()
+ uint64_t height;
+ std::string hash;
+ BEGIN_KV_SERIALIZE_MAP()
+ KV_SERIALIZE(height)
+ KV_SERIALIZE(hash)
+ END_KV_SERIALIZE_MAP()
};
struct t_hash_json {
- std::vector<t_hashline> hashlines;
- BEGIN_KV_SERIALIZE_MAP()
- KV_SERIALIZE(hashlines)
- END_KV_SERIALIZE_MAP()
+ std::vector<t_hashline> hashlines;
+ BEGIN_KV_SERIALIZE_MAP()
+ KV_SERIALIZE(hashlines)
+ END_KV_SERIALIZE_MAP()
};
bool create_checkpoints(cryptonote::checkpoints& checkpoints)
-{
+{
ADD_CHECKPOINT(1, "771fbcd656ec1464d3a02ead5e18644030007a0fc664c0a964d30922821a8148");
ADD_CHECKPOINT(10, "c0e3b387e47042f72d8ccdca88071ff96bff1ac7cde09ae113dbb7ad3fe92381");
ADD_CHECKPOINT(100, "ac3e11ca545e57c49fca2b4e8c48c03c23be047c43e471e1394528b1f9f80b2d");
@@ -126,11 +126,11 @@ bool load_checkpoints_from_json(cryptonote::checkpoints& checkpoints, std::strin
uint64_t height;
height = it->height;
if (height <= prev_max_height) {
- LOG_PRINT_L1("ignoring checkpoint height " << height);
+ LOG_PRINT_L1("ignoring checkpoint height " << height);
} else {
- std::string blockhash = it->hash;
- LOG_PRINT_L1("Adding checkpoint height " << height << ", hash=" << blockhash);
- ADD_CHECKPOINT(height, blockhash);
+ std::string blockhash = it->hash;
+ LOG_PRINT_L1("Adding checkpoint height " << height << ", hash=" << blockhash);
+ ADD_CHECKPOINT(height, blockhash);
}
++it;
}
@@ -142,15 +142,15 @@ bool load_checkpoints_from_dns(cryptonote::checkpoints& checkpoints, bool testne
{
// All four MoneroPulse domains have DNSSEC on and valid
static const std::vector<std::string> dns_urls = { "checkpoints.moneropulse.se"
- , "checkpoints.moneropulse.org"
- , "checkpoints.moneropulse.net"
- , "checkpoints.moneropulse.co"
+ , "checkpoints.moneropulse.org"
+ , "checkpoints.moneropulse.net"
+ , "checkpoints.moneropulse.co"
};
static const std::vector<std::string> testnet_dns_urls = { "testpoints.moneropulse.se"
- , "testpoints.moneropulse.org"
- , "testpoints.moneropulse.net"
- , "testpoints.moneropulse.co"
+ , "testpoints.moneropulse.org"
+ , "testpoints.moneropulse.net"
+ , "testpoints.moneropulse.co"
};
std::vector<std::vector<std::string> > records;
@@ -220,8 +220,8 @@ bool load_checkpoints_from_dns(cryptonote::checkpoints& checkpoints, bool testne
{
if (dns_records_match(records[i], records[j]))
{
- good_records_index = i;
- break;
+ good_records_index = i;
+ break;
}
}
if (good_records_index >= 0) break;
@@ -246,7 +246,7 @@ bool load_checkpoints_from_dns(cryptonote::checkpoints& checkpoints, bool testne
std::stringstream ss(record.substr(0, pos));
if (!(ss >> height))
{
- continue;
+ continue;
}
// parse the second part as crypto::hash,
@@ -254,7 +254,7 @@ bool load_checkpoints_from_dns(cryptonote::checkpoints& checkpoints, bool testne
std::string hashStr = record.substr(pos + 1);
if (!epee::string_tools::parse_tpod_from_hex_string(hashStr, hash))
{
- continue;
+ continue;
}
ADD_CHECKPOINT(height, hashStr);
diff --git a/src/cryptonote_core/cryptonote_basic_impl.cpp b/src/cryptonote_core/cryptonote_basic_impl.cpp
index 73e8d8fb9..9ced19890 100644
--- a/src/cryptonote_core/cryptonote_basic_impl.cpp
+++ b/src/cryptonote_core/cryptonote_basic_impl.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include "include_base_utils.h"
@@ -106,8 +106,8 @@ namespace cryptonote {
assert(current_block_size < std::numeric_limits<uint32_t>::max());
uint64_t product_hi;
- // BUGFIX: 32-bit saturation bug (e.g. ARM7), the result was being
- // treated as 32-bit by default.
+ // BUGFIX: 32-bit saturation bug (e.g. ARM7), the result was being
+ // treated as 32-bit by default.
uint64_t multiplicand = 2 * median_size - current_block_size;
multiplicand *= current_block_size;
uint64_t product_lo = mul128(base_reward, multiplicand, &product_hi);
diff --git a/src/cryptonote_core/cryptonote_core.cpp b/src/cryptonote_core/cryptonote_core.cpp
index 960c8eff8..8a3b81205 100644
--- a/src/cryptonote_core/cryptonote_core.cpp
+++ b/src/cryptonote_core/cryptonote_core.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include "include_base_utils.h"
@@ -63,7 +63,7 @@ namespace cryptonote
m_blockchain_storage(&m_mempool),
#endif
m_miner(this),
- m_miner_address(boost::value_initialized<account_public_address>()),
+ m_miner_address(boost::value_initialized<account_public_address>()),
m_starter_message_showed(false),
m_target_blockchain_height(0),
m_checkpoints_path(""),
@@ -176,10 +176,10 @@ namespace cryptonote
set_enforce_dns_checkpoints(command_line::get_arg(vm, command_line::arg_dns_checkpoints));
test_drop_download_height(command_line::get_arg(vm, command_line::arg_test_drop_download_height));
-
+
if (command_line::get_arg(vm, command_line::arg_test_drop_download) == true)
- test_drop_download();
-
+ test_drop_download();
+
return true;
}
//-----------------------------------------------------------------------------------------------
@@ -331,7 +331,7 @@ namespace cryptonote
db->set_auto_remove_logs(auto_remove_logs);
db->open(filename, db_flags);
if(!db->m_open)
- return false;
+ return false;
}
catch (const DB_ERROR& e)
{
@@ -374,12 +374,12 @@ namespace cryptonote
//-----------------------------------------------------------------------------------------------
bool core::deinit()
{
- m_miner.stop();
- m_mempool.deinit();
- if (!m_fast_exit)
- {
- m_blockchain_storage.deinit();
- }
+ m_miner.stop();
+ m_mempool.deinit();
+ if (!m_fast_exit)
+ {
+ m_blockchain_storage.deinit();
+ }
return true;
}
//-----------------------------------------------------------------------------------------------
@@ -395,28 +395,28 @@ namespace cryptonote
//-----------------------------------------------------------------------------------------------
void core::test_drop_download()
{
- m_test_drop_download = false;
+ m_test_drop_download = false;
}
//-----------------------------------------------------------------------------------------------
void core::test_drop_download_height(uint64_t height)
{
- m_test_drop_download_height = height;
+ m_test_drop_download_height = height;
}
//-----------------------------------------------------------------------------------------------
bool core::get_test_drop_download() const
{
- return m_test_drop_download;
+ return m_test_drop_download;
}
//-----------------------------------------------------------------------------------------------
bool core::get_test_drop_download_height() const
{
- if (m_test_drop_download_height == 0)
- return true;
-
- if (get_blockchain_storage().get_current_blockchain_height() <= m_test_drop_download_height)
- return true;
+ if (m_test_drop_download_height == 0)
+ return true;
+
+ if (get_blockchain_storage().get_current_blockchain_height() <= m_test_drop_download_height)
+ return true;
- return false;
+ return false;
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_tx(const blobdata& tx_blob, tx_verification_context& tvc, bool keeped_by_block, bool relayed)
@@ -852,14 +852,14 @@ namespace cryptonote
{
if(!m_starter_message_showed)
{
- LOG_PRINT_L0(ENDL << "**********************************************************************" << ENDL
- << "The daemon will start synchronizing with the network. It may take up to several hours." << ENDL
+ LOG_PRINT_L0(ENDL << "**********************************************************************" << ENDL
+ << "The daemon will start synchronizing with the network. It may take up to several hours." << ENDL
<< ENDL
<< "You can set the level of process detailization* through \"set_log <level>\" command*, where <level> is between 0 (no details) and 4 (very verbose)." << ENDL
<< ENDL
<< "Use \"help\" command to see the list of available commands." << ENDL
<< ENDL
- << "Note: in case you need to interrupt the process, use \"exit\" command. Otherwise, the current progress won't be saved." << ENDL
+ << "Note: in case you need to interrupt the process, use \"exit\" command. Otherwise, the current progress won't be saved." << ENDL
<< "**********************************************************************");
m_starter_message_showed = true;
}
@@ -915,6 +915,6 @@ namespace cryptonote
{
raise(SIGTERM);
}
-
+
std::atomic<bool> core::m_fast_exit(false);
}
diff --git a/src/cryptonote_core/cryptonote_core.h b/src/cryptonote_core/cryptonote_core.h
index 5d665cb98..e91d28694 100644
--- a/src/cryptonote_core/cryptonote_core.h
+++ b/src/cryptonote_core/cryptonote_core.h
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#pragma once
@@ -68,7 +68,7 @@ namespace cryptonote
bool handle_incoming_block(const blobdata& block_blob, block_verification_context& bvc, bool update_miner_blocktemplate = true);
bool prepare_handle_incoming_blocks(const std::list<block_complete_entry> &blocks);
bool cleanup_handle_incoming_blocks(bool force_sync = false);
-
+
bool check_incoming_block_size(const blobdata& block_blob) const;
i_cryptonote_protocol* get_protocol(){return m_pprotocol;}
@@ -175,7 +175,7 @@ namespace cryptonote
static std::atomic<bool> m_fast_exit;
bool m_test_drop_download = true;
- uint64_t m_test_drop_download_height = 0;
+ uint64_t m_test_drop_download_height = 0;
tx_memory_pool m_mempool;
#if BLOCKCHAIN_DB == DB_LMDB
diff --git a/src/cryptonote_core/difficulty.cpp b/src/cryptonote_core/difficulty.cpp
index 3c3f1dec1..9a4f5d080 100644
--- a/src/cryptonote_core/difficulty.cpp
+++ b/src/cryptonote_core/difficulty.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <algorithm>
@@ -53,40 +53,40 @@ namespace cryptonote {
#else
static inline void mul(uint64_t a, uint64_t b, uint64_t &low, uint64_t &high) {
- // __int128 isn't part of the standard, so the previous function wasn't portable. mul128() in Windows is fine,
- // but this portable function should be used elsewhere. Credit for this function goes to latexi95.
-
- uint64_t aLow = a & 0xFFFFFFFF;
- uint64_t aHigh = a >> 32;
- uint64_t bLow = b & 0xFFFFFFFF;
- uint64_t bHigh = b >> 32;
-
- uint64_t res = aLow * bLow;
- uint64_t lowRes1 = res & 0xFFFFFFFF;
- uint64_t carry = res >> 32;
-
- res = aHigh * bLow + carry;
- uint64_t highResHigh1 = res >> 32;
- uint64_t highResLow1 = res & 0xFFFFFFFF;
-
- res = aLow * bHigh;
- uint64_t lowRes2 = res & 0xFFFFFFFF;
- carry = res >> 32;
-
- res = aHigh * bHigh + carry;
- uint64_t highResHigh2 = res >> 32;
- uint64_t highResLow2 = res & 0xFFFFFFFF;
-
- //Addition
-
- uint64_t r = highResLow1 + lowRes2;
- carry = r >> 32;
- low = (r << 32) | lowRes1;
- r = highResHigh1 + highResLow2 + carry;
- uint64_t d3 = r & 0xFFFFFFFF;
- carry = r >> 32;
- r = highResHigh2 + carry;
- high = d3 | (r << 32);
+ // __int128 isn't part of the standard, so the previous function wasn't portable. mul128() in Windows is fine,
+ // but this portable function should be used elsewhere. Credit for this function goes to latexi95.
+
+ uint64_t aLow = a & 0xFFFFFFFF;
+ uint64_t aHigh = a >> 32;
+ uint64_t bLow = b & 0xFFFFFFFF;
+ uint64_t bHigh = b >> 32;
+
+ uint64_t res = aLow * bLow;
+ uint64_t lowRes1 = res & 0xFFFFFFFF;
+ uint64_t carry = res >> 32;
+
+ res = aHigh * bLow + carry;
+ uint64_t highResHigh1 = res >> 32;
+ uint64_t highResLow1 = res & 0xFFFFFFFF;
+
+ res = aLow * bHigh;
+ uint64_t lowRes2 = res & 0xFFFFFFFF;
+ carry = res >> 32;
+
+ res = aHigh * bHigh + carry;
+ uint64_t highResHigh2 = res >> 32;
+ uint64_t highResLow2 = res & 0xFFFFFFFF;
+
+ //Addition
+
+ uint64_t r = highResLow1 + lowRes2;
+ carry = r >> 32;
+ low = (r << 32) | lowRes1;
+ r = highResHigh1 + highResLow2 + carry;
+ uint64_t d3 = r & 0xFFFFFFFF;
+ carry = r >> 32;
+ r = highResHigh2 + carry;
+ high = d3 | (r << 32);
}
#endif
diff --git a/src/cryptonote_core/miner.cpp b/src/cryptonote_core/miner.cpp
index b99113d9e..abb74b740 100644
--- a/src/cryptonote_core/miner.cpp
+++ b/src/cryptonote_core/miner.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <sstream>
@@ -68,9 +68,9 @@ namespace cryptonote
m_thread_index(0),
m_phandler(phandler),
m_height(0),
- m_pausers_count(0),
+ m_pausers_count(0),
m_threads_total(0),
- m_starter_nonce(0),
+ m_starter_nonce(0),
m_last_hr_merge_time(0),
m_hashes(0),
m_do_print_hashrate(false),
@@ -109,7 +109,7 @@ namespace cryptonote
block bl = AUTO_VAL_INIT(bl);
difficulty_type di = AUTO_VAL_INIT(di);
uint64_t height = AUTO_VAL_INIT(height);
- cryptonote::blobdata extra_nonce;
+ cryptonote::blobdata extra_nonce;
if(m_extra_messages.size() && m_config.current_extra_message_index < m_extra_messages.size())
{
extra_nonce = m_extra_messages[m_config.current_extra_message_index];
@@ -135,7 +135,7 @@ namespace cryptonote
merge_hr();
return true;
});
-
+
return true;
}
//-----------------------------------------------------------------------------------------------------
@@ -227,7 +227,7 @@ namespace cryptonote
uint32_t miner::get_threads_count() const {
return m_threads_total;
}
- //-----------------------------------------------------------------------------------------------------
+ //-----------------------------------------------------------------------------------------------------
bool miner::start(const account_public_address& adr, size_t threads_count, const boost::thread::attributes& attrs)
{
m_mine_address = adr;
@@ -346,7 +346,7 @@ namespace cryptonote
difficulty_type local_diff = 0;
uint32_t local_template_ver = 0;
block b;
- slow_hash_allocate_state();
+ slow_hash_allocate_state();
while(!m_stop)
{
if(m_pausers_count)//anti split workaround
@@ -357,7 +357,6 @@ namespace cryptonote
if(local_template_ver != m_template_no)
{
-
CRITICAL_REGION_BEGIN(m_template_lock);
b = m_template;
local_diff = m_diffic;
@@ -395,7 +394,7 @@ namespace cryptonote
nonce+=m_threads_total;
++m_hashes;
}
- slow_hash_free_state();
+ slow_hash_free_state();
LOG_PRINT_L0("Miner thread stopped ["<< th_local_index << "]");
return true;
}
diff --git a/src/cryptonote_core/tx_pool.cpp b/src/cryptonote_core/tx_pool.cpp
index e2dcd35f5..5ee6c38cc 100644
--- a/src/cryptonote_core/tx_pool.cpp
+++ b/src/cryptonote_core/tx_pool.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <algorithm>
@@ -228,8 +228,8 @@ namespace cryptonote
bool tx_memory_pool::remove_transaction_keyimages(const transaction& tx)
{
CRITICAL_REGION_LOCAL(m_transactions_lock);
- // ND: Speedup
- // 1. Move transaction hash calcuation outside of loop. ._.
+ // ND: Speedup
+ // 1. Move transaction hash calcuation outside of loop. ._.
crypto::hash actual_hash = get_transaction_hash(tx);
BOOST_FOREACH(const txin_v& vi, tx.vin)
{
@@ -299,7 +299,7 @@ namespace cryptonote
{
uint64_t tx_age = time(nullptr) - it->second.receive_time;
- if((tx_age > CRYPTONOTE_MEMPOOL_TX_LIVETIME && !it->second.kept_by_block) ||
+ if((tx_age > CRYPTONOTE_MEMPOOL_TX_LIVETIME && !it->second.kept_by_block) ||
(tx_age > CRYPTONOTE_MEMPOOL_TX_FROM_ALT_BLOCK_LIVETIME && it->second.kept_by_block) )
{
LOG_PRINT_L1("Tx " << it->first << " removed from tx pool due to outdated, age: " << tx_age );
@@ -574,7 +574,7 @@ namespace cryptonote
// If adding this tx will make the block size
// greater than CRYPTONOTE_GETBLOCKTEMPLATE_MAX
- // _BLOCK_SIZE bytes, reject the tx; this will
+ // _BLOCK_SIZE bytes, reject the tx; this will
// keep block sizes from becoming too unwieldly
// to propagate at 60s block times.
if ( (total_size + tx_it->second.blob_size) > CRYPTONOTE_GETBLOCKTEMPLATE_MAX_BLOCK_SIZE )
@@ -586,7 +586,7 @@ namespace cryptonote
// If we've exceeded the penalty free size,
// stop including more tx
if (total_size > median_size)
- break;
+ break;
// Skip transactions that are not ready to be
// included into the blockchain or that are
diff --git a/src/cryptonote_core/tx_pool.h b/src/cryptonote_core/tx_pool.h
index a72c331a7..4dcca579c 100644
--- a/src/cryptonote_core/tx_pool.h
+++ b/src/cryptonote_core/tx_pool.h
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#pragma once
@@ -139,7 +139,7 @@ namespace cryptonote
};
private:
- bool remove_stuck_transactions();
+ bool remove_stuck_transactions();
bool have_tx_keyimg_as_spent(const crypto::key_image& key_im) const;
bool have_tx_keyimges_as_spent(const transaction& tx) const;
bool remove_transaction_keyimages(const transaction& tx);
diff --git a/src/cryptonote_protocol/cryptonote_protocol_handler.inl b/src/cryptonote_protocol/cryptonote_protocol_handler.inl
index 9099cd414..38b1e7321 100644
--- a/src/cryptonote_protocol/cryptonote_protocol_handler.inl
+++ b/src/cryptonote_protocol/cryptonote_protocol_handler.inl
@@ -3,23 +3,23 @@
/// @brief This is the orginal cryptonote protocol network-events handler, modified by us
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -29,7 +29,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
// (may contain code and/or modifications by other developers)
@@ -49,14 +49,14 @@ namespace cryptonote
{
-// static
-// template<class t_core> std::ofstream t_cryptonote_protocol_handler<t_core>::m_logreq("logreq.txt"); // static
+// static
+// template<class t_core> std::ofstream t_cryptonote_protocol_handler<t_core>::m_logreq("logreq.txt"); // static
- //-----------------------------------------------------------------------------------------------------------------------
+ //-----------------------------------------------------------------------------------------------------------------------
template<class t_core>
- t_cryptonote_protocol_handler<t_core>::t_cryptonote_protocol_handler(t_core& rcore, nodetool::i_p2p_endpoint<connection_context>* p_net_layout):m_core(rcore),
+ t_cryptonote_protocol_handler<t_core>::t_cryptonote_protocol_handler(t_core& rcore, nodetool::i_p2p_endpoint<connection_context>* p_net_layout):m_core(rcore),
m_p2p(p_net_layout),
m_syncronized_connections_count(0),
m_synchronized(false)
@@ -66,21 +66,21 @@ namespace cryptonote
m_p2p = &m_p2p_stub;
}
//-----------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::init(const boost::program_options::variables_map& vm)
{
return true;
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::deinit()
{
-
+
return true;
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
void t_cryptonote_protocol_handler<t_core>::set_p2p_endpoint(nodetool::i_p2p_endpoint<connection_context>* p2p)
{
if(p2p)
@@ -88,8 +88,8 @@ namespace cryptonote
else
m_p2p = &m_p2p_stub;
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::on_callback(cryptonote_connection_context& context)
{
LOG_PRINT_CCONTEXT_L2("callback fired");
@@ -107,22 +107,22 @@ namespace cryptonote
return true;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::get_stat_info(core_stat_info& stat_inf)
{
return m_core.get_stat_info(stat_inf);
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
void t_cryptonote_protocol_handler<t_core>::log_connections()
{
std::stringstream ss;
ss.precision(1);
-
- double down_sum = 0.0;
- double down_curr_sum = 0.0;
- double up_sum = 0.0;
- double up_curr_sum = 0.0;
+
+ double down_sum = 0.0;
+ double down_curr_sum = 0.0;
+ double up_sum = 0.0;
+ double up_curr_sum = 0.0;
ss << std::setw(30) << std::left << "Remote Host"
<< std::setw(20) << "Peer id"
@@ -131,19 +131,19 @@ namespace cryptonote
<< std::setw(20) << "Livetime(sec)"
<< std::setw(12) << "Down (kB/s)"
<< std::setw(14) << "Down(now)"
- << std::setw(10) << "Up (kB/s)"
+ << std::setw(10) << "Up (kB/s)"
<< std::setw(13) << "Up(now)"
<< ENDL;
- uint32_t ip;
+ uint32_t ip;
m_p2p->for_each_connection([&](const connection_context& cntxt, nodetool::peerid_type peer_id)
{
- bool local_ip = false;
- ip = ntohl(cntxt.m_remote_ip);
- // TODO: local ip in calss A, B
- if (ip > 3232235520 && ip < 3232301055) // 192.168.x.x
- local_ip = true;
- auto connection_time = time(NULL) - cntxt.m_started;
+ bool local_ip = false;
+ ip = ntohl(cntxt.m_remote_ip);
+ // TODO: local ip in calss A, B
+ if (ip > 3232235520 && ip < 3232301055) // 192.168.x.x
+ local_ip = true;
+ auto connection_time = time(NULL) - cntxt.m_started;
ss << std::setw(30) << std::left << std::string(cntxt.m_is_income ? " [INC]":"[OUT]") +
epee::string_tools::get_ip_string_from_int32(cntxt.m_remote_ip) + ":" + std::to_string(cntxt.m_remote_port)
<< std::setw(20) << std::hex << peer_id
@@ -157,30 +157,30 @@ namespace cryptonote
<< (local_ip ? "[LAN]" : "")
<< std::left << (ip == LOCALHOST_INT ? "[LOCALHOST]" : "") // 127.0.0.1
<< ENDL;
-
- if (connection_time > 1)
- {
- down_sum += (cntxt.m_recv_cnt / connection_time / 1024);
- up_sum += (cntxt.m_send_cnt / connection_time / 1024);
- }
-
- down_curr_sum += (cntxt.m_current_speed_down / 1024);
- up_curr_sum += (cntxt.m_current_speed_up / 1024);
-
+
+ if (connection_time > 1)
+ {
+ down_sum += (cntxt.m_recv_cnt / connection_time / 1024);
+ up_sum += (cntxt.m_send_cnt / connection_time / 1024);
+ }
+
+ down_curr_sum += (cntxt.m_current_speed_down / 1024);
+ up_curr_sum += (cntxt.m_current_speed_up / 1024);
+
return true;
});
- ss << ENDL
- << std::setw(125) << " "
- << std::setw(12) << down_sum
- << std::setw(14) << down_curr_sum
- << std::setw(10) << up_sum
- << std::setw(13) << up_curr_sum
- << ENDL;
- LOG_PRINT_L0("Connections: " << ENDL << ss.str());
+ ss << ENDL
+ << std::setw(125) << " "
+ << std::setw(12) << down_sum
+ << std::setw(14) << down_curr_sum
+ << std::setw(10) << up_sum
+ << std::setw(13) << up_curr_sum
+ << ENDL;
+ LOG_PRINT_L0("Connections: " << ENDL << ss.str());
}
- //------------------------------------------------------------------------------------------------------------------------
+ //------------------------------------------------------------------------------------------------------------------------
// Returns a list of connection_info objects describing each open p2p connection
- //------------------------------------------------------------------------------------------------------------------------
+ //------------------------------------------------------------------------------------------------------------------------
template<class t_core>
std::list<connection_info> t_cryptonote_protocol_handler<t_core>::get_connections()
{
@@ -209,43 +209,43 @@ namespace cryptonote
cnx.state = get_protocol_state_string(cntxt.m_state);
cnx.live_time = timestamp - cntxt.m_started;
-
+
uint32_t ip;
- ip = ntohl(cntxt.m_remote_ip);
- if (ip == LOCALHOST_INT)
- {
- cnx.localhost = true;
- }
- else
- {
- cnx.localhost = false;
- }
-
+ ip = ntohl(cntxt.m_remote_ip);
+ if (ip == LOCALHOST_INT)
+ {
+ cnx.localhost = true;
+ }
+ else
+ {
+ cnx.localhost = false;
+ }
+
if (ip > 3232235520 && ip < 3232301055) // 192.168.x.x
{
- cnx.local_ip = true;
- }
- else
- {
- cnx.local_ip = false;
- }
-
- auto connection_time = time(NULL) - cntxt.m_started;
- if (connection_time == 0)
- {
- cnx.avg_download = 0;
- cnx.avg_upload = 0;
- }
-
- else
- {
- cnx.avg_download = cntxt.m_recv_cnt / connection_time / 1024;
- cnx.avg_upload = cntxt.m_send_cnt / connection_time / 1024;
- }
-
- cnx.current_download = cntxt.m_current_speed_down / 1024;
- cnx.current_upload = cntxt.m_current_speed_up / 1024;
-
+ cnx.local_ip = true;
+ }
+ else
+ {
+ cnx.local_ip = false;
+ }
+
+ auto connection_time = time(NULL) - cntxt.m_started;
+ if (connection_time == 0)
+ {
+ cnx.avg_download = 0;
+ cnx.avg_upload = 0;
+ }
+
+ else
+ {
+ cnx.avg_download = cntxt.m_recv_cnt / connection_time / 1024;
+ cnx.avg_upload = cntxt.m_send_cnt / connection_time / 1024;
+ }
+
+ cnx.current_download = cntxt.m_current_speed_down / 1024;
+ cnx.current_upload = cntxt.m_current_speed_up / 1024;
+
connections.push_back(cnx);
return true;
@@ -254,7 +254,7 @@ namespace cryptonote
return connections;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::process_payload_sync_data(const CORE_SYNC_DATA& hshd, cryptonote_connection_context& context, bool is_inital)
{
if(context.m_state == cryptonote_connection_context::state_befor_handshake && !is_inital)
@@ -263,7 +263,7 @@ namespace cryptonote
if(context.m_state == cryptonote_connection_context::state_synchronizing)
return true;
- if(m_core.have_block(hshd.top_id))
+ if(m_core.have_block(hshd.top_id))
{
context.m_state = cryptonote_connection_context::state_normal;
if(is_inital)
@@ -271,11 +271,11 @@ namespace cryptonote
return true;
}
- /* As I don't know if accessing hshd from core could be a good practice,
+ /* As I don't know if accessing hshd from core could be a good practice,
I prefer pushing target height to the core at the same time it is pushed to the user.
Nz. */
m_core.set_target_blockchain_height(static_cast<int64_t>(hshd.current_height));
-
+
int64_t diff = static_cast<int64_t>(hshd.current_height) - static_cast<int64_t>(m_core.get_current_blockchain_height());
LOG_PRINT_CCONTEXT_YELLOW("Sync data returned unknown top block: " << m_core.get_current_blockchain_height() << " -> " << hshd.current_height
<< " [" << std::abs(diff) << " blocks (" << diff / (24 * 60 * 60 / DIFFICULTY_TARGET) << " days) "
@@ -290,16 +290,16 @@ namespace cryptonote
m_p2p->request_callback(context);
return true;
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::get_payload_sync_data(CORE_SYNC_DATA& hshd)
{
m_core.get_blockchain_top(hshd.current_height, hshd.top_id);
hshd.current_height +=1;
return true;
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::get_payload_sync_data(blobdata& data)
{
CORE_SYNC_DATA hsd = boost::value_initialized<CORE_SYNC_DATA>();
@@ -307,8 +307,8 @@ namespace cryptonote
epee::serialization::store_t_to_binary(hsd, data);
return true;
}
- //------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ //------------------------------------------------------------------------------------------------------------------------
+ template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_notify_new_block(int command, NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& context)
{
LOG_PRINT_CCONTEXT_L2("NOTIFY_NEW_BLOCK (hop " << arg.hop << ")");
@@ -333,7 +333,7 @@ namespace cryptonote
}
block_verification_context bvc = boost::value_initialized<block_verification_context>();
- m_core.handle_incoming_block(arg.b.block, bvc); // got block from handle_notify_new_block
+ m_core.handle_incoming_block(arg.b.block, bvc); // got block from handle_notify_new_block
m_core.cleanup_handle_incoming_blocks(true);
m_core.resume_mine();
if(bvc.m_verifivation_failed)
@@ -355,11 +355,11 @@ namespace cryptonote
LOG_PRINT_CCONTEXT_L2("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() );
post_notify<NOTIFY_REQUEST_CHAIN>(r, context);
}
-
+
return 1;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_notify_new_transactions(int command, NOTIFY_NEW_TRANSACTIONS::request& arg, cryptonote_connection_context& context)
{
LOG_PRINT_CCONTEXT_L2("NOTIFY_NEW_TRANSACTIONS");
@@ -391,7 +391,7 @@ namespace cryptonote
return true;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_request_get_objects(int command, NOTIFY_REQUEST_GET_OBJECTS::request& arg, cryptonote_connection_context& context)
{
LOG_PRINT_CCONTEXT_L2("NOTIFY_REQUEST_GET_OBJECTS");
@@ -401,7 +401,7 @@ namespace cryptonote
LOG_ERROR_CCONTEXT("failed to handle request NOTIFY_REQUEST_GET_OBJECTS, dropping connection");
m_p2p->drop_connection(context);
}
- LOG_PRINT_CCONTEXT_L2("-->>NOTIFY_RESPONSE_GET_OBJECTS: blocks.size()=" << rsp.blocks.size() << ", txs.size()=" << rsp.txs.size()
+ LOG_PRINT_CCONTEXT_L2("-->>NOTIFY_RESPONSE_GET_OBJECTS: blocks.size()=" << rsp.blocks.size() << ", txs.size()=" << rsp.txs.size()
<< ", rsp.m_current_blockchain_height=" << rsp.current_blockchain_height << ", missed_ids.size()=" << rsp.missed_ids.size());
post_notify<NOTIFY_RESPONSE_GET_OBJECTS>(rsp, context);
//handler_response_blocks_now(sizeof(rsp)); // XXX
@@ -410,67 +410,67 @@ namespace cryptonote
}
//------------------------------------------------------------------------------------------------------------------------
-
+
template<class t_core>
double t_cryptonote_protocol_handler<t_core>::get_avg_block_size() {
- // return m_core.get_blockchain_storage().get_avg_block_size(count); // this does not count too well the actuall network-size of data we need to download
-
- CRITICAL_REGION_LOCAL(m_buffer_mutex);
- double avg = 0;
- if (m_avg_buffer.size() == 0) {
- _warn("m_avg_buffer.size() == 0");
- return 500;
- }
-
- const bool dbg_poke_lock = 0; // debug: try to trigger an error by poking around with locks. TODO: configure option
- long int dbg_repeat=0;
- do {
- for (auto element : m_avg_buffer) avg += element;
- } while(dbg_poke_lock && (dbg_repeat++)<100000); // in debug/poke mode, repeat this calculation to trigger hidden locking error if there is one
- return avg / m_avg_buffer.size();
+ // return m_core.get_blockchain_storage().get_avg_block_size(count); // this does not count too well the actuall network-size of data we need to download
+
+ CRITICAL_REGION_LOCAL(m_buffer_mutex);
+ double avg = 0;
+ if (m_avg_buffer.size() == 0) {
+ _warn("m_avg_buffer.size() == 0");
+ return 500;
+ }
+
+ const bool dbg_poke_lock = 0; // debug: try to trigger an error by poking around with locks. TODO: configure option
+ long int dbg_repeat=0;
+ do {
+ for (auto element : m_avg_buffer) avg += element;
+ } while(dbg_poke_lock && (dbg_repeat++)<100000); // in debug/poke mode, repeat this calculation to trigger hidden locking error if there is one
+ return avg / m_avg_buffer.size();
}
-
+
template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_response_get_objects(int command, NOTIFY_RESPONSE_GET_OBJECTS::request& arg, cryptonote_connection_context& context)
{
LOG_PRINT_CCONTEXT_L2("NOTIFY_RESPONSE_GET_OBJECTS");
-
+
// calculate size of request - mainly for logging/debug
size_t size = 0;
for (auto element : arg.txs) size += element.size();
-
- for (auto element : arg.blocks) {
- size += element.block.size();
- for (auto tx : element.txs)
- size += tx.size();
- }
-
- for (auto element : arg.missed_ids)
- size += sizeof(element.data);
-
- size += sizeof(arg.current_blockchain_height);
- {
- CRITICAL_REGION_LOCAL(m_buffer_mutex);
- m_avg_buffer.push_back(size);
-
- const bool dbg_poke_lock = 0; // debug: try to trigger an error by poking around with locks. TODO: configure option
- long int dbg_repeat=0;
- do {
- m_avg_buffer.push_back(666); // a test value
- m_avg_buffer.erase_end(1);
- } while(dbg_poke_lock && (dbg_repeat++)<100000); // in debug/poke mode, repeat this calculation to trigger hidden locking error if there is one
- }
- /*using namespace boost::chrono;
- auto point = steady_clock::now();
- auto time_from_epoh = point.time_since_epoch();
- auto sec = duration_cast< seconds >( time_from_epoh ).count();*/
-
- //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size());
-
+
+ for (auto element : arg.blocks) {
+ size += element.block.size();
+ for (auto tx : element.txs)
+ size += tx.size();
+ }
+
+ for (auto element : arg.missed_ids)
+ size += sizeof(element.data);
+
+ size += sizeof(arg.current_blockchain_height);
+ {
+ CRITICAL_REGION_LOCAL(m_buffer_mutex);
+ m_avg_buffer.push_back(size);
+
+ const bool dbg_poke_lock = 0; // debug: try to trigger an error by poking around with locks. TODO: configure option
+ long int dbg_repeat=0;
+ do {
+ m_avg_buffer.push_back(666); // a test value
+ m_avg_buffer.erase_end(1);
+ } while(dbg_poke_lock && (dbg_repeat++)<100000); // in debug/poke mode, repeat this calculation to trigger hidden locking error if there is one
+ }
+ /*using namespace boost::chrono;
+ auto point = steady_clock::now();
+ auto time_from_epoh = point.time_since_epoch();
+ auto sec = duration_cast< seconds >( time_from_epoh ).count();*/
+
+ //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size());
+
if(context.m_last_response_height > arg.current_blockchain_height)
{
- LOG_ERROR_CCONTEXT("sent wrong NOTIFY_HAVE_OBJECTS: arg.m_current_blockchain_height=" << arg.current_blockchain_height
+ LOG_ERROR_CCONTEXT("sent wrong NOTIFY_HAVE_OBJECTS: arg.m_current_blockchain_height=" << arg.current_blockchain_height
<< " < m_last_response_height=" << context.m_last_response_height << ", dropping connection");
m_p2p->drop_connection(context);
return 1;
@@ -485,14 +485,14 @@ namespace cryptonote
block b;
if(!parse_and_validate_block_from_blob(block_entry.block, b))
{
- LOG_ERROR_CCONTEXT("sent wrong block: failed to parse and validate block: \r\n"
+ LOG_ERROR_CCONTEXT("sent wrong block: failed to parse and validate block: \r\n"
<< epee::string_tools::buff_to_hex_nodelimer(block_entry.block) << "\r\n dropping connection");
m_p2p->drop_connection(context);
return 1;
- }
+ }
//to avoid concurrency in core between connections, suspend connections which delivered block later then first one
if(count == 2)
- {
+ {
if(m_core.have_block(get_block_hash(b)))
{
context.m_state = cryptonote_connection_context::state_idle;
@@ -502,18 +502,18 @@ namespace cryptonote
return 1;
}
}
-
+
auto req_it = context.m_requested_objects.find(get_block_hash(b));
if(req_it == context.m_requested_objects.end())
{
- LOG_ERROR_CCONTEXT("sent wrong NOTIFY_RESPONSE_GET_OBJECTS: block with id=" << epee::string_tools::pod_to_hex(get_blob_hash(block_entry.block))
+ LOG_ERROR_CCONTEXT("sent wrong NOTIFY_RESPONSE_GET_OBJECTS: block with id=" << epee::string_tools::pod_to_hex(get_blob_hash(block_entry.block))
<< " wasn't requested, dropping connection");
m_p2p->drop_connection(context);
return 1;
}
- if(b.tx_hashes.size() != block_entry.txs.size())
+ if(b.tx_hashes.size() != block_entry.txs.size())
{
- LOG_ERROR_CCONTEXT("sent wrong NOTIFY_RESPONSE_GET_OBJECTS: block with id=" << epee::string_tools::pod_to_hex(get_blob_hash(block_entry.block))
+ LOG_ERROR_CCONTEXT("sent wrong NOTIFY_RESPONSE_GET_OBJECTS: block with id=" << epee::string_tools::pod_to_hex(get_blob_hash(block_entry.block))
<< ", tx_hashes.size()=" << b.tx_hashes.size() << " mismatch with block_complete_entry.m_txs.size()=" << block_entry.txs.size() << ", dropping connection");
m_p2p->drop_connection(context);
return 1;
@@ -524,7 +524,7 @@ namespace cryptonote
if(context.m_requested_objects.size())
{
- LOG_PRINT_CCONTEXT_RED("returned not all requested objects (context.m_requested_objects.size()="
+ LOG_PRINT_CCONTEXT_RED("returned not all requested objects (context.m_requested_objects.size()="
<< context.m_requested_objects.size() << "), dropping connection", LOG_LEVEL_0);
m_p2p->drop_connection(context);
return 1;
@@ -537,78 +537,77 @@ namespace cryptonote
boost::bind(&t_core::resume_mine, &m_core));
LOG_PRINT_CCONTEXT_YELLOW( "Got NEW BLOCKS inside of " << __FUNCTION__ << ": size: " << arg.blocks.size() , LOG_LEVEL_1);
-
+
if (m_core.get_test_drop_download() && m_core.get_test_drop_download_height()) { // DISCARD BLOCKS for testing
-
- uint64_t previous_height = m_core.get_current_blockchain_height();
-
- m_core.prepare_handle_incoming_blocks(arg.blocks);
- BOOST_FOREACH(const block_complete_entry& block_entry, arg.blocks)
- {
- // process transactions
- TIME_MEASURE_START(transactions_process_time);
- BOOST_FOREACH(auto& tx_blob, block_entry.txs)
- {
- tx_verification_context tvc = AUTO_VAL_INIT(tvc);
- m_core.handle_incoming_tx(tx_blob, tvc, true, true);
- if(tvc.m_verifivation_failed)
- {
- LOG_ERROR_CCONTEXT("transaction verification failed on NOTIFY_RESPONSE_GET_OBJECTS, \r\ntx_id = "
- << epee::string_tools::pod_to_hex(get_blob_hash(tx_blob)) << ", dropping connection");
- m_p2p->drop_connection(context);
- m_core.cleanup_handle_incoming_blocks();
- return 1;
- }
- }
- TIME_MEASURE_FINISH(transactions_process_time);
-
- // process block
-
- TIME_MEASURE_START(block_process_time);
- block_verification_context bvc = boost::value_initialized<block_verification_context>();
-
- m_core.handle_incoming_block(block_entry.block, bvc, false); // <--- process block
-
- if(bvc.m_verifivation_failed)
- {
- LOG_PRINT_CCONTEXT_L1("Block verification failed, dropping connection");
- m_p2p->drop_connection(context);
- m_p2p->add_ip_fail(context.m_remote_ip);
- m_core.cleanup_handle_incoming_blocks();
- return 1;
- }
- if(bvc.m_marked_as_orphaned)
- {
- LOG_PRINT_CCONTEXT_L1("Block received at sync phase was marked as orphaned, dropping connection");
- m_p2p->drop_connection(context);
- m_p2p->add_ip_fail(context.m_remote_ip);
- m_core.cleanup_handle_incoming_blocks();
- return 1;
- }
-
- TIME_MEASURE_FINISH(block_process_time);
- LOG_PRINT_CCONTEXT_L2("Block process time: " << block_process_time + transactions_process_time << "(" << transactions_process_time << "/" << block_process_time << ")ms");
-
- epee::net_utils::data_logger::get_instance().add_data("calc_time", block_process_time + transactions_process_time);
- epee::net_utils::data_logger::get_instance().add_data("block_processing", 1);
-
-
- } // each download block
- m_core.cleanup_handle_incoming_blocks();
-
- if (m_core.get_current_blockchain_height() > previous_height)
- {
- LOG_PRINT_CCONTEXT_YELLOW( "Synced " << m_core.get_current_blockchain_height() << "/" << m_core.get_target_blockchain_height() , LOG_LEVEL_0);
- }
- } // if not DISCARD BLOCK
-
-
+
+ uint64_t previous_height = m_core.get_current_blockchain_height();
+
+ m_core.prepare_handle_incoming_blocks(arg.blocks);
+ BOOST_FOREACH(const block_complete_entry& block_entry, arg.blocks)
+ {
+ // process transactions
+ TIME_MEASURE_START(transactions_process_time);
+ BOOST_FOREACH(auto& tx_blob, block_entry.txs)
+ {
+ tx_verification_context tvc = AUTO_VAL_INIT(tvc);
+ m_core.handle_incoming_tx(tx_blob, tvc, true, true);
+ if(tvc.m_verifivation_failed)
+ {
+ LOG_ERROR_CCONTEXT("transaction verification failed on NOTIFY_RESPONSE_GET_OBJECTS, \r\ntx_id = "
+ << epee::string_tools::pod_to_hex(get_blob_hash(tx_blob)) << ", dropping connection");
+ m_p2p->drop_connection(context);
+ m_core.cleanup_handle_incoming_blocks();
+ return 1;
+ }
+ }
+ TIME_MEASURE_FINISH(transactions_process_time);
+
+ // process block
+
+ TIME_MEASURE_START(block_process_time);
+ block_verification_context bvc = boost::value_initialized<block_verification_context>();
+
+ m_core.handle_incoming_block(block_entry.block, bvc, false); // <--- process block
+
+ if(bvc.m_verifivation_failed)
+ {
+ LOG_PRINT_CCONTEXT_L1("Block verification failed, dropping connection");
+ m_p2p->drop_connection(context);
+ m_p2p->add_ip_fail(context.m_remote_ip);
+ m_core.cleanup_handle_incoming_blocks();
+ return 1;
+ }
+ if(bvc.m_marked_as_orphaned)
+ {
+ LOG_PRINT_CCONTEXT_L1("Block received at sync phase was marked as orphaned, dropping connection");
+ m_p2p->drop_connection(context);
+ m_p2p->add_ip_fail(context.m_remote_ip);
+ m_core.cleanup_handle_incoming_blocks();
+ return 1;
+ }
+
+ TIME_MEASURE_FINISH(block_process_time);
+ LOG_PRINT_CCONTEXT_L2("Block process time: " << block_process_time + transactions_process_time << "(" << transactions_process_time << "/" << block_process_time << ")ms");
+
+ epee::net_utils::data_logger::get_instance().add_data("calc_time", block_process_time + transactions_process_time);
+ epee::net_utils::data_logger::get_instance().add_data("block_processing", 1);
+
+ } // each download block
+ m_core.cleanup_handle_incoming_blocks();
+
+ if (m_core.get_current_blockchain_height() > previous_height)
+ {
+ LOG_PRINT_CCONTEXT_YELLOW( "Synced " << m_core.get_current_blockchain_height() << "/" << m_core.get_target_blockchain_height() , LOG_LEVEL_0);
+ }
+ } // if not DISCARD BLOCK
+
+
}
request_missing_objects(context, true);
return 1;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::on_idle()
{
return m_core.on_idle();
@@ -629,18 +628,18 @@ namespace cryptonote
return 1;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::request_missing_objects(cryptonote_connection_context& context, bool check_having_blocks)
{
- //if (!m_one_request == false)
- //return true;
- m_one_request = false;
- // save request size to log (dr monero)
- /*using namespace boost::chrono;
- auto point = steady_clock::now();
- auto time_from_epoh = point.time_since_epoch();
- auto sec = duration_cast< seconds >( time_from_epoh ).count();*/
-
+ //if (!m_one_request == false)
+ //return true;
+ m_one_request = false;
+ // save request size to log (dr monero)
+ /*using namespace boost::chrono;
+ auto point = steady_clock::now();
+ auto time_from_epoh = point.time_since_epoch();
+ auto sec = duration_cast< seconds >( time_from_epoh ).count();*/
+
if(context.m_needed_objects.size())
{
//we know objects that we need, request this objects
@@ -648,8 +647,8 @@ namespace cryptonote
size_t count = 0;
auto it = context.m_needed_objects.begin();
- size_t count_limit = BLOCKS_SYNCHRONIZING_DEFAULT_COUNT;
- _note_c("net/req-calc" , "Setting count_limit: " << count_limit);
+ size_t count_limit = BLOCKS_SYNCHRONIZING_DEFAULT_COUNT;
+ _note_c("net/req-calc" , "Setting count_limit: " << count_limit);
while(it != context.m_needed_objects.end() && count < BLOCKS_SYNCHRONIZING_DEFAULT_COUNT)
{
if( !(check_having_blocks && m_core.have_block(*it)))
@@ -661,35 +660,35 @@ namespace cryptonote
context.m_needed_objects.erase(it++);
}
LOG_PRINT_CCONTEXT_L1("-->>NOTIFY_REQUEST_GET_OBJECTS: blocks.size()=" << req.blocks.size() << ", txs.size()=" << req.txs.size()
- << "requested blocks count=" << count << " / " << count_limit);
- //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size());
-
- post_notify<NOTIFY_REQUEST_GET_OBJECTS>(req, context);
+ << "requested blocks count=" << count << " / " << count_limit);
+ //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size());
+
+ post_notify<NOTIFY_REQUEST_GET_OBJECTS>(req, context);
}else if(context.m_last_response_height < context.m_remote_blockchain_height-1)
{//we have to fetch more objects ids, request blockchain entry
-
+
NOTIFY_REQUEST_CHAIN::request r = boost::value_initialized<NOTIFY_REQUEST_CHAIN::request>();
m_core.get_short_chain_history(r.block_ids);
- handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?)
-
- //std::string blob; // for calculate size of request
- //epee::serialization::store_t_to_binary(r, blob);
- //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size());
- LOG_PRINT_CCONTEXT_L1("r = " << 200);
-
+ handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?)
+
+ //std::string blob; // for calculate size of request
+ //epee::serialization::store_t_to_binary(r, blob);
+ //epee::net_utils::network_throttle_manager::get_global_throttle_inreq().logger_handle_net("log/dr-monero/net/req-all.data", sec, get_avg_block_size());
+ LOG_PRINT_CCONTEXT_L1("r = " << 200);
+
LOG_PRINT_CCONTEXT_L1("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() );
post_notify<NOTIFY_REQUEST_CHAIN>(r, context);
}else
- {
- CHECK_AND_ASSERT_MES(context.m_last_response_height == context.m_remote_blockchain_height-1
- && !context.m_needed_objects.size()
- && !context.m_requested_objects.size(), false, "request_missing_blocks final condition failed!"
+ {
+ CHECK_AND_ASSERT_MES(context.m_last_response_height == context.m_remote_blockchain_height-1
+ && !context.m_needed_objects.size()
+ && !context.m_requested_objects.size(), false, "request_missing_blocks final condition failed!"
<< "\r\nm_last_response_height=" << context.m_last_response_height
<< "\r\nm_remote_blockchain_height=" << context.m_remote_blockchain_height
<< "\r\nm_needed_objects.size()=" << context.m_needed_objects.size()
<< "\r\nm_requested_objects.size()=" << context.m_requested_objects.size()
<< "\r\non connection [" << epee::net_utils::print_connection_context_short(context)<< "]");
-
+
context.m_state = cryptonote_connection_context::state_normal;
LOG_PRINT_CCONTEXT_GREEN(" SYNCHRONIZED OK", LOG_LEVEL_0);
on_connection_synchronized();
@@ -697,16 +696,16 @@ namespace cryptonote
return true;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::on_connection_synchronized()
{
bool val_expected = false;
if(m_synchronized.compare_exchange_strong(val_expected, true))
{
- LOG_PRINT_L0(ENDL << "**********************************************************************" << ENDL
- << "You are now synchronized with the network. You may now start simplewallet." << ENDL
+ LOG_PRINT_L0(ENDL << "**********************************************************************" << ENDL
+ << "You are now synchronized with the network. You may now start simplewallet." << ENDL
<< ENDL
- << "Please note, that the blockchain will be saved only after you quit the daemon with \"exit\" command or if you use \"save\" command." << ENDL
+ << "Please note, that the blockchain will be saved only after you quit the daemon with \"exit\" command or if you use \"save\" command." << ENDL
<< "Otherwise, you will possibly need to synchronize the blockchain again." << ENDL
<< ENDL
<< "Use \"help\" command to see the list of available commands." << ENDL
@@ -716,7 +715,7 @@ namespace cryptonote
return true;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
size_t t_cryptonote_protocol_handler<t_core>::get_synchronizing_connections_count()
{
size_t count = 0;
@@ -728,12 +727,12 @@ namespace cryptonote
return count;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_response_chain_entry(int command, NOTIFY_RESPONSE_CHAIN_ENTRY::request& arg, cryptonote_connection_context& context)
{
- LOG_PRINT_CCONTEXT_L2("NOTIFY_RESPONSE_CHAIN_ENTRY: m_block_ids.size()=" << arg.m_block_ids.size()
+ LOG_PRINT_CCONTEXT_L2("NOTIFY_RESPONSE_CHAIN_ENTRY: m_block_ids.size()=" << arg.m_block_ids.size()
<< ", m_start_height=" << arg.start_height << ", m_total_height=" << arg.total_height);
-
+
if(!arg.m_block_ids.size())
{
LOG_ERROR_CCONTEXT("sent empty m_block_ids, dropping connection");
@@ -750,7 +749,7 @@ namespace cryptonote
m_p2p->add_ip_fail(context.m_remote_ip);
return 1;
}
-
+
context.m_remote_blockchain_height = arg.total_height;
context.m_last_response_height = arg.start_height + arg.m_block_ids.size()-1;
if(context.m_last_response_height > context.m_remote_blockchain_height)
@@ -771,29 +770,29 @@ namespace cryptonote
return 1;
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::relay_block(NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& exclude_context)
{
return relay_post_notify<NOTIFY_NEW_BLOCK>(arg, exclude_context);
}
//------------------------------------------------------------------------------------------------------------------------
- template<class t_core>
+ template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::relay_transactions(NOTIFY_NEW_TRANSACTIONS::request& arg, cryptonote_connection_context& exclude_context)
{
return relay_post_notify<NOTIFY_NEW_TRANSACTIONS>(arg, exclude_context);
}
- /// @deprecated
- template<class t_core> std::ofstream& t_cryptonote_protocol_handler<t_core>::get_logreq() const {
- static std::ofstream * logreq=NULL;
- if (!logreq) {
- LOG_PRINT_RED("LOG OPENED",LOG_LEVEL_0);
- logreq = new std::ofstream("logreq.txt"); // leak mem (singleton)
- *logreq << "Opened log" << std::endl;
- }
- LOG_PRINT_YELLOW("LOG USED",LOG_LEVEL_0);
- (*logreq) << "log used" << std::endl;
- return *logreq;
- }
+ /// @deprecated
+ template<class t_core> std::ofstream& t_cryptonote_protocol_handler<t_core>::get_logreq() const {
+ static std::ofstream * logreq=NULL;
+ if (!logreq) {
+ LOG_PRINT_RED("LOG OPENED",LOG_LEVEL_0);
+ logreq = new std::ofstream("logreq.txt"); // leak mem (singleton)
+ *logreq << "Opened log" << std::endl;
+ }
+ LOG_PRINT_YELLOW("LOG USED",LOG_LEVEL_0);
+ (*logreq) << "log used" << std::endl;
+ return *logreq;
+ }
} // namespace
diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt
index 90befa8d1..6358bb3ff 100644
--- a/src/daemon/CMakeLists.txt
+++ b/src/daemon/CMakeLists.txt
@@ -28,12 +28,12 @@
set(blocksdat "")
if(PER_BLOCK_CHECKPOINT)
- if(APPLE)
- add_custom_command(OUTPUT blocksdat.o COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && touch stub.c && ${CMAKE_C_COMPILER} -o stub.o -c stub.c COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ld -r -sectcreate __DATA __blocks_dat ../blocks/checkpoints.dat -o ${CMAKE_CURRENT_BINARY_DIR}/blocksdat.o stub.o && rm -f stub.*)
- else()
- add_custom_command(OUTPUT blocksdat.o COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && cp ../blocks/checkpoints.dat blocks.dat && ld -r -b binary -o ${CMAKE_CURRENT_BINARY_DIR}/blocksdat.o blocks.dat && rm -f blocks.dat)
- endif()
- set(blocksdat "blocksdat.o")
+ if(APPLE)
+ add_custom_command(OUTPUT blocksdat.o COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && touch stub.c && ${CMAKE_C_COMPILER} -o stub.o -c stub.c COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ld -r -sectcreate __DATA __blocks_dat ../blocks/checkpoints.dat -o ${CMAKE_CURRENT_BINARY_DIR}/blocksdat.o stub.o && rm -f stub.*)
+ else()
+ add_custom_command(OUTPUT blocksdat.o COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && cp ../blocks/checkpoints.dat blocks.dat && ld -r -b binary -o ${CMAKE_CURRENT_BINARY_DIR}/blocksdat.o blocks.dat && rm -f blocks.dat)
+ endif()
+ set(blocksdat "blocksdat.o")
endif()
set(daemon_sources
diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp
index 68e375269..63d25ef1c 100644
--- a/src/daemon/main.cpp
+++ b/src/daemon/main.cpp
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -51,8 +51,8 @@ int main(int argc, char const * argv[])
{
try {
- _note_c("dbg/main", "Begin of main()");
- // TODO parse the debug options like set log level right here at start
+ _note_c("dbg/main", "Begin of main()");
+ // TODO parse the debug options like set log level right here at start
tools::sanitize_locale();
@@ -77,7 +77,7 @@ int main(int argc, char const * argv[])
command_line::add_arg(visible_options, daemon_args::arg_config_file, default_conf.string());
command_line::add_arg(visible_options, command_line::arg_test_dbg_lock_sleep);
cryptonote::core::init_options(core_settings);
-
+
// Settings
bf::path default_log = default_data_dir / std::string(CRYPTONOTE_NAME ".log");
command_line::add_arg(core_settings, daemon_args::arg_log_file, default_log.string());
@@ -132,7 +132,7 @@ int main(int argc, char const * argv[])
std::cout << "OS: " << tools::get_os_version_string() << ENDL;
return 0;
}
-
+
epee::g_test_dbg_lock_sleep = command_line::get_arg(vm, command_line::arg_test_dbg_lock_sleep);
std::string db_type = command_line::get_arg(vm, command_line::arg_db_type);
@@ -261,7 +261,7 @@ int main(int argc, char const * argv[])
);
}
- _note_c("dbg/main", "Moving from main() into the daemonize now.");
+ _note_c("dbg/main", "Moving from main() into the daemonize now.");
return daemonizer::daemonize(argc, argv, daemonize::t_executor{}, vm);
}
diff --git a/src/p2p/net_node.h b/src/p2p/net_node.h
index 99f12beed..772973d6b 100644
--- a/src/p2p/net_node.h
+++ b/src/p2p/net_node.h
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#pragma once
@@ -83,15 +83,15 @@ namespace nodetool
typedef t_payload_net_handler payload_net_handler;
node_server(t_payload_net_handler& payload_handler)
- :m_payload_handler(payload_handler),
+ :m_payload_handler(payload_handler),
m_current_number_of_out_peers(0),
- m_allow_local_ip(false),
- m_hide_my_port(false),
+ m_allow_local_ip(false),
+ m_hide_my_port(false),
m_no_igd(false),
m_offline(false),
m_save_graph(false),
is_closing(false),
- m_net_server( epee::net_utils::e_connection_type_P2P ) // this is a P2P connection of the main p2p node server, because this is class node_server<>
+ m_net_server( epee::net_utils::e_connection_type_P2P ) // this is a P2P connection of the main p2p node server, because this is class node_server<>
{}
virtual ~node_server()
{}
@@ -128,7 +128,7 @@ namespace nodetool
, "seeds.moneroseeds.ch"
, "seeds.moneroseeds.li"
};
-
+
bool islimitup=false;
bool islimitdown=false;
@@ -201,7 +201,7 @@ namespace nodetool
bool try_to_connect_and_handshake_with_new_peer(const net_address& na, bool just_take_peerlist = false, uint64_t last_seen_stamp = 0, bool white = true);
size_t get_random_index_with_fixed_probability(size_t max_index);
bool is_peer_used(const peerlist_entry& peer);
- bool is_addr_connected(const net_address& peer);
+ bool is_addr_connected(const net_address& peer);
template<class t_callback>
bool try_ping(basic_node_data& node_data, p2p_connection_context& context, t_callback cb);
bool make_expected_connections_count(bool white_list, size_t expected_connections);
@@ -215,20 +215,20 @@ namespace nodetool
template <class Container>
bool parse_peers_and_add_to_container(const boost::program_options::variables_map& vm, const command_line::arg_descriptor<std::vector<std::string> > & arg, Container& container);
- bool set_max_out_peers(const boost::program_options::variables_map& vm, int64_t max);
- bool set_tos_flag(const boost::program_options::variables_map& vm, int limit);
-
- bool set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit);
- bool set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit);
- bool set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit);
+ bool set_max_out_peers(const boost::program_options::variables_map& vm, int64_t max);
+ bool set_tos_flag(const boost::program_options::variables_map& vm, int limit);
+
+ bool set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit);
+ bool set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit);
+ bool set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit);
void kill() { ///< will be called e.g. from deinit()
- _info("Killing the net_node");
- is_closing = true;
- if(mPeersLoggerThread != nullptr)
- mPeersLoggerThread->join(); // make sure the thread finishes
- _info("Joined extra background net_node threads");
- }
+ _info("Killing the net_node");
+ is_closing = true;
+ if(mPeersLoggerThread != nullptr)
+ mPeersLoggerThread->join(); // make sure the thread finishes
+ _info("Joined extra background net_node threads");
+ }
//debug functions
std::string print_connections_container();
@@ -247,16 +247,16 @@ namespace nodetool
END_KV_SERIALIZE_MAP()
};
- public:
+ public:
config m_config; // TODO was private, add getters?
std::atomic<unsigned int> m_current_number_of_out_peers;
- void set_save_graph(bool save_graph)
- {
- m_save_graph = save_graph;
- epee::net_utils::connection_basic::set_save_graph(save_graph);
- }
- private:
+ void set_save_graph(bool save_graph)
+ {
+ m_save_graph = save_graph;
+ epee::net_utils::connection_basic::set_save_graph(save_graph);
+ }
+ private:
std::string m_config_folder;
bool m_have_address;
@@ -270,7 +270,7 @@ namespace nodetool
bool m_offline;
std::atomic<bool> m_save_graph;
std::atomic<bool> is_closing;
- std::unique_ptr<std::thread> mPeersLoggerThread;
+ std::unique_ptr<std::thread> mPeersLoggerThread;
//critical_section m_connections_lock;
//connections_indexed_container m_connections;
diff --git a/src/p2p/net_node.inl b/src/p2p/net_node.inl
index aa22d5b23..83bb80ac0 100644
--- a/src/p2p/net_node.inl
+++ b/src/p2p/net_node.inl
@@ -1,21 +1,21 @@
// Copyright (c) 2014-2015, The Monero Project
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
-//
+//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -25,7 +25,7 @@
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
// IP blocking adapted from Boolberry
@@ -34,7 +34,7 @@
#include <algorithm>
#include <boost/date_time/posix_time/posix_time.hpp>
-#include <boost/thread/thread.hpp>
+#include <boost/thread/thread.hpp>
#include <atomic>
#include "version.h"
@@ -89,17 +89,17 @@ namespace nodetool
" If this option is given the options add-priority-node and seed-node are ignored"};
const command_line::arg_descriptor<std::vector<std::string> > arg_p2p_seed_node = {"seed-node", "Connect to a node to retrieve peer addresses, and disconnect"};
const command_line::arg_descriptor<bool> arg_p2p_hide_my_port = {"hide-my-port", "Do not announce yourself as peerlist candidate", false, true};
-
- const command_line::arg_descriptor<bool> arg_no_igd = {"no-igd", "Disable UPnP port mapping"};
- const command_line::arg_descriptor<bool> arg_offline = {"offline", "Do not listen for peers, nor connect to any"};
- const command_line::arg_descriptor<int64_t> arg_out_peers = {"out-peers", "set max limit of out peers", -1};
- const command_line::arg_descriptor<int> arg_tos_flag = {"tos-flag", "set TOS flag", -1};
-
- const command_line::arg_descriptor<int64_t> arg_limit_rate_up = {"limit-rate-up", "set limit-rate-up [kB/s]", -1};
- const command_line::arg_descriptor<int64_t> arg_limit_rate_down = {"limit-rate-down", "set limit-rate-down [kB/s]", -1};
- const command_line::arg_descriptor<int64_t> arg_limit_rate = {"limit-rate", "set limit-rate [kB/s]", -1};
-
- const command_line::arg_descriptor<bool> arg_save_graph = {"save-graph", "Save data for dr monero", false};
+
+ const command_line::arg_descriptor<bool> arg_no_igd = {"no-igd", "Disable UPnP port mapping"};
+ const command_line::arg_descriptor<bool> arg_offline = {"offline", "Do not listen for peers, nor connect to any"};
+ const command_line::arg_descriptor<int64_t> arg_out_peers = {"out-peers", "set max limit of out peers", -1};
+ const command_line::arg_descriptor<int> arg_tos_flag = {"tos-flag", "set TOS flag", -1};
+
+ const command_line::arg_descriptor<int64_t> arg_limit_rate_up = {"limit-rate-up", "set limit-rate-up [kB/s]", -1};
+ const command_line::arg_descriptor<int64_t> arg_limit_rate_down = {"limit-rate-down", "set limit-rate-down [kB/s]", -1};
+ const command_line::arg_descriptor<int64_t> arg_limit_rate = {"limit-rate", "set limit-rate [kB/s]", -1};
+
+ const command_line::arg_descriptor<bool> arg_save_graph = {"save-graph", "Save data for dr monero", false};
}
//-----------------------------------------------------------------------------------
template<class t_payload_net_handler>
@@ -113,7 +113,7 @@ namespace nodetool
command_line::add_arg(desc, arg_p2p_add_peer);
command_line::add_arg(desc, arg_p2p_add_priority_node);
command_line::add_arg(desc, arg_p2p_add_exclusive_node);
- command_line::add_arg(desc, arg_p2p_seed_node);
+ command_line::add_arg(desc, arg_p2p_seed_node);
command_line::add_arg(desc, arg_p2p_hide_my_port);
command_line::add_arg(desc, arg_no_igd);
command_line::add_arg(desc, arg_offline);
@@ -254,7 +254,7 @@ namespace nodetool
m_offline = command_line::get_arg(vm, arg_offline);
if (command_line::has_arg(vm, arg_p2p_add_peer))
- {
+ {
std::vector<std::string> perrs = command_line::get_arg(vm, arg_p2p_add_peer);
for(const std::string& pr_str: perrs)
{
@@ -265,24 +265,24 @@ namespace nodetool
m_command_line_peers.push_back(pe);
}
}
-
+
if(command_line::has_arg(vm, arg_save_graph))
{
- set_save_graph(true);
- }
+ set_save_graph(true);
+ }
if (command_line::has_arg(vm,arg_p2p_add_exclusive_node))
{
if (!parse_peers_and_add_to_container(vm, arg_p2p_add_exclusive_node, m_exclusive_peers))
return false;
}
-
+
if (command_line::has_arg(vm, arg_p2p_add_priority_node))
{
if (!parse_peers_and_add_to_container(vm, arg_p2p_add_priority_node, m_priority_peers))
return false;
}
-
+
if (command_line::has_arg(vm, arg_p2p_seed_node))
{
if (!parse_peers_and_add_to_container(vm, arg_p2p_seed_node, m_seed_nodes))
@@ -291,21 +291,21 @@ namespace nodetool
if(command_line::has_arg(vm, arg_p2p_hide_my_port))
m_hide_my_port = true;
-
+
if ( !set_max_out_peers(vm, command_line::get_arg(vm, arg_out_peers) ) )
- return false;
-
- if ( !set_tos_flag(vm, command_line::get_arg(vm, arg_tos_flag) ) )
- return false;
+ return false;
- if ( !set_rate_up_limit(vm, command_line::get_arg(vm, arg_limit_rate_up) ) )
- return false;
+ if ( !set_tos_flag(vm, command_line::get_arg(vm, arg_tos_flag) ) )
+ return false;
- if ( !set_rate_down_limit(vm, command_line::get_arg(vm, arg_limit_rate_down) ) )
- return false;
+ if ( !set_rate_up_limit(vm, command_line::get_arg(vm, arg_limit_rate_up) ) )
+ return false;
- if ( !set_rate_limit(vm, command_line::get_arg(vm, arg_limit_rate) ) )
- return false;
+ if ( !set_rate_down_limit(vm, command_line::get_arg(vm, arg_limit_rate_down) ) )
+ return false;
+
+ if ( !set_rate_limit(vm, command_line::get_arg(vm, arg_limit_rate) ) )
+ return false;
return true;
}
@@ -370,7 +370,7 @@ namespace nodetool
// add the result addresses as seed nodes
// TODO: at some point add IPv6 support, but that won't be relevant
// for some time yet.
-
+
std::vector<std::vector<std::string>> dns_results;
dns_results.resize(m_seed_nodes_list.size());
@@ -484,7 +484,7 @@ namespace nodetool
for(auto& p: m_command_line_peers)
m_peerlist.append_with_peer_white(p);
-
+
//only in case if we really sure that we have external visible ip
m_have_address = true;
m_ip_address = 0;
@@ -512,48 +512,48 @@ namespace nodetool
// Add UPnP port mapping
if(m_no_igd == false) {
- LOG_PRINT_L0("Attempting to add IGD port mapping.");
- int result;
+ LOG_PRINT_L0("Attempting to add IGD port mapping.");
+ int result;
#if MINIUPNPC_API_VERSION > 13
- // default according to miniupnpc.h
- unsigned char ttl = 2;
- UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, ttl, &result);
+ // default according to miniupnpc.h
+ unsigned char ttl = 2;
+ UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, ttl, &result);
#else
- UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, &result);
+ UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, &result);
#endif
- UPNPUrls urls;
- IGDdatas igdData;
- char lanAddress[64];
- result = UPNP_GetValidIGD(deviceList, &urls, &igdData, lanAddress, sizeof lanAddress);
- freeUPNPDevlist(deviceList);
- if (result != 0) {
- if (result == 1) {
- std::ostringstream portString;
- portString << m_listenning_port;
-
- // Delete the port mapping before we create it, just in case we have dangling port mapping from the daemon not being shut down correctly
- UPNP_DeletePortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), "TCP", 0);
-
- int portMappingResult;
- portMappingResult = UPNP_AddPortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), portString.str().c_str(), lanAddress, CRYPTONOTE_NAME, "TCP", 0, "0");
- if (portMappingResult != 0) {
- LOG_ERROR("UPNP_AddPortMapping failed, error: " << strupnperror(portMappingResult));
- } else {
- LOG_PRINT_GREEN("Added IGD port mapping.", LOG_LEVEL_0);
- }
- } else if (result == 2) {
- LOG_PRINT_L0("IGD was found but reported as not connected.");
- } else if (result == 3) {
- LOG_PRINT_L0("UPnP device was found but not recoginzed as IGD.");
- } else {
- LOG_ERROR("UPNP_GetValidIGD returned an unknown result code.");
- }
-
- FreeUPNPUrls(&urls);
- } else {
- LOG_PRINT_L0("No IGD was found.");
- }
- }
+ UPNPUrls urls;
+ IGDdatas igdData;
+ char lanAddress[64];
+ result = UPNP_GetValidIGD(deviceList, &urls, &igdData, lanAddress, sizeof lanAddress);
+ freeUPNPDevlist(deviceList);
+ if (result != 0) {
+ if (result == 1) {
+ std::ostringstream portString;
+ portString << m_listenning_port;
+
+ // Delete the port mapping before we create it, just in case we have dangling port mapping from the daemon not being shut down correctly
+ UPNP_DeletePortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), "TCP", 0);
+
+ int portMappingResult;
+ portMappingResult = UPNP_AddPortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), portString.str().c_str(), lanAddress, CRYPTONOTE_NAME, "TCP", 0, "0");
+ if (portMappingResult != 0) {
+ LOG_ERROR("UPNP_AddPortMapping failed, error: " << strupnperror(portMappingResult));
+ } else {
+ LOG_PRINT_GREEN("Added IGD port mapping.", LOG_LEVEL_0);
+ }
+ } else if (result == 2) {
+ LOG_PRINT_L0("IGD was found but reported as not connected.");
+ } else if (result == 3) {
+ LOG_PRINT_L0("UPnP device was found but not recoginzed as IGD.");
+ } else {
+ LOG_ERROR("UPNP_GetValidIGD returned an unknown result code.");
+ }
+
+ FreeUPNPUrls(&urls);
+ } else {
+ LOG_PRINT_L0("No IGD was found.");
+ }
+ }
return res;
}
//-----------------------------------------------------------------------------------
@@ -566,30 +566,30 @@ namespace nodetool
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::run()
{
- // creating thread to log number of connections
- mPeersLoggerThread.reset(new std::thread([&]()
- {
- _note("Thread monitor number of peers - start");
- while (!is_closing)
- { // main loop of thread
- //number_of_peers = m_net_server.get_config_object().get_connections_count();
- unsigned int number_of_peers = 0;
- m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
- {
- if (!cntxt.m_is_income) ++number_of_peers;
- return true;
- }); // lambda
-
- m_current_number_of_out_peers = number_of_peers;
- if (epee::net_utils::data_logger::is_dying())
- break;
- epee::net_utils::data_logger::get_instance().add_data("peers", number_of_peers);
-
- std::this_thread::sleep_for(std::chrono::seconds(1));
- } // main loop of thread
- _note("Thread monitor number of peers - done");
- })); // lambda
-
+ // creating thread to log number of connections
+ mPeersLoggerThread.reset(new std::thread([&]()
+ {
+ _note("Thread monitor number of peers - start");
+ while (!is_closing)
+ { // main loop of thread
+ //number_of_peers = m_net_server.get_config_object().get_connections_count();
+ unsigned int number_of_peers = 0;
+ m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
+ {
+ if (!cntxt.m_is_income) ++number_of_peers;
+ return true;
+ }); // lambda
+
+ m_current_number_of_out_peers = number_of_peers;
+ if (epee::net_utils::data_logger::is_dying())
+ break;
+ epee::net_utils::data_logger::get_instance().add_data("peers", number_of_peers);
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ } // main loop of thread
+ _note("Thread monitor number of peers - done");
+ })); // lambda
+
//here you can set worker threads count
int thrds_count = 10;
@@ -620,7 +620,7 @@ namespace nodetool
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::deinit()
{
- kill();
+ kill();
m_peerlist.deinit();
m_net_server.deinit_server();
return store_config();
@@ -662,7 +662,7 @@ namespace nodetool
return true;
}
//-----------------------------------------------------------------------------------
-
+
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::do_handshake_with_peer(peerid_type& pi, p2p_connection_context& context_, bool just_take_peerlist)
@@ -671,11 +671,11 @@ namespace nodetool
typename COMMAND_HANDSHAKE::response rsp;
get_local_node_data(arg.node_data);
m_payload_handler.get_payload_sync_data(arg.payload_data);
-
+
epee::simple_event ev;
std::atomic<bool> hsh_result(false);
-
- bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_HANDSHAKE::response>(context_.m_connection_id, COMMAND_HANDSHAKE::ID, arg, m_net_server.get_config_object(),
+
+ bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_HANDSHAKE::response>(context_.m_connection_id, COMMAND_HANDSHAKE::ID, arg, m_net_server.get_config_object(),
[this, &pi, &ev, &hsh_result, &just_take_peerlist](int code, const typename COMMAND_HANDSHAKE::response& rsp, p2p_connection_context& context)
{
epee::misc_utils::auto_scope_leave_caller scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ev.raise();});
@@ -744,7 +744,7 @@ namespace nodetool
typename COMMAND_TIMED_SYNC::request arg = AUTO_VAL_INIT(arg);
m_payload_handler.get_payload_sync_data(arg.payload_data);
- bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_TIMED_SYNC::response>(context_.m_connection_id, COMMAND_TIMED_SYNC::ID, arg, m_net_server.get_config_object(),
+ bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_TIMED_SYNC::response>(context_.m_connection_id, COMMAND_TIMED_SYNC::ID, arg, m_net_server.get_config_object(),
[this](int code, const typename COMMAND_TIMED_SYNC::response& rsp, p2p_connection_context& context)
{
if(code < 0)
@@ -835,16 +835,16 @@ namespace nodetool
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::try_to_connect_and_handshake_with_new_peer(const net_address& na, bool just_take_peerlist, uint64_t last_seen_stamp, bool white)
{
- if (m_current_number_of_out_peers == m_config.m_net_config.connections_count) // out peers limit
- {
- return false;
- }
- else if (m_current_number_of_out_peers > m_config.m_net_config.connections_count)
- {
- m_net_server.get_config_object().del_out_connections(1);
- m_current_number_of_out_peers --; // atomic variable, update time = 1s
- return false;
- }
+ if (m_current_number_of_out_peers == m_config.m_net_config.connections_count) // out peers limit
+ {
+ return false;
+ }
+ else if (m_current_number_of_out_peers > m_config.m_net_config.connections_count)
+ {
+ m_net_server.get_config_object().del_out_connections(1);
+ m_current_number_of_out_peers --; // atomic variable, update time = 1s
+ return false;
+ }
LOG_PRINT_L1("Connecting to " << epee::string_tools::get_ip_string_from_int32(na.ip) << ":"
<< epee::string_tools::num_to_string_fast(na.port) << "(white=" << white << ", last_seen: "
<< (last_seen_stamp ? epee::misc_utils::get_time_interval_string(time(NULL) - last_seen_stamp):"never")
@@ -946,12 +946,12 @@ namespace nodetool
++try_count;
- _note("Considering connecting (out) to peer: " << pe.id << " " << epee::string_tools::get_ip_string_from_int32(pe.adr.ip) << ":" << boost::lexical_cast<std::string>(pe.adr.port));
+ _note("Considering connecting (out) to peer: " << pe.id << " " << epee::string_tools::get_ip_string_from_int32(pe.adr.ip) << ":" << boost::lexical_cast<std::string>(pe.adr.port));
if(is_peer_used(pe)) {
- _note("Peer is used");
+ _note("Peer is used");
continue;
- }
+ }
if(!is_remote_ip_allowed(pe.adr.ip))
continue;
@@ -963,11 +963,11 @@ namespace nodetool
<< ":" << boost::lexical_cast<std::string>(pe.adr.port)
<< "[white=" << use_white_list
<< "] last_seen: " << (pe.last_seen ? epee::misc_utils::get_time_interval_string(time(NULL) - pe.last_seen) : "never"));
-
+
if(!try_to_connect_and_handshake_with_new_peer(pe.adr, false, pe.last_seen, use_white_list)) {
- _note("Handshake failed");
+ _note("Handshake failed");
continue;
- }
+ }
return true;
}
@@ -986,7 +986,7 @@ namespace nodetool
size_t try_count = 0;
size_t current_index = crypto::rand<size_t>()%m_seed_nodes.size();
while(true)
- {
+ {
if(m_net_server.is_stop_signal_sent())
return false;
@@ -1135,7 +1135,7 @@ namespace nodetool
node_data.peer_id = m_config.m_peer_id;
if(!m_hide_my_port)
node_data.my_port = m_external_port ? m_external_port : m_listenning_port;
- else
+ else
node_data.my_port = 0;
node_data.network_id = m_network_id;
return true;
@@ -1515,92 +1515,92 @@ namespace nodetool
return true;
}
-
+
template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::set_max_out_peers(const boost::program_options::variables_map& vm, int64_t max)
- {
- if(max == -1) {
- m_config.m_net_config.connections_count = P2P_DEFAULT_CONNECTIONS_COUNT;
- epee::net_utils::data_logger::get_instance().add_data("peers_limit", m_config.m_net_config.connections_count);
- return true;
- }
- epee::net_utils::data_logger::get_instance().add_data("peers_limit", max);
- m_config.m_net_config.connections_count = max;
- return true;
- }
-
+ {
+ if(max == -1) {
+ m_config.m_net_config.connections_count = P2P_DEFAULT_CONNECTIONS_COUNT;
+ epee::net_utils::data_logger::get_instance().add_data("peers_limit", m_config.m_net_config.connections_count);
+ return true;
+ }
+ epee::net_utils::data_logger::get_instance().add_data("peers_limit", max);
+ m_config.m_net_config.connections_count = max;
+ return true;
+ }
+
template<class t_payload_net_handler>
void node_server<t_payload_net_handler>::delete_connections(size_t count)
{
- m_net_server.get_config_object().del_out_connections(count);
+ m_net_server.get_config_object().del_out_connections(count);
}
-
- template<class t_payload_net_handler>
+
+ template<class t_payload_net_handler>
bool node_server<t_payload_net_handler>::set_tos_flag(const boost::program_options::variables_map& vm, int flag)
- {
- if(flag==-1){
- return true;
- }
- epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_tos_flag(flag);
- _dbg1("Set ToS flag " << flag);
- return true;
- }
-
- template<class t_payload_net_handler>
- bool node_server<t_payload_net_handler>::set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit)
- {
- this->islimitup=true;
-
- if (limit==-1) {
- limit=default_limit_up;
- this->islimitup=false;
- }
-
- limit *= 1024;
- epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit( limit );
- LOG_PRINT_L0("Set limit-up to " << limit/1024 << " kB/s");
- return true;
- }
-
+ {
+ if(flag==-1){
+ return true;
+ }
+ epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_tos_flag(flag);
+ _dbg1("Set ToS flag " << flag);
+ return true;
+ }
+
template<class t_payload_net_handler>
- bool node_server<t_payload_net_handler>::set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit)
- {
- this->islimitdown=true;
- if(limit==-1) {
- limit=default_limit_down;
- this->islimitdown=false;
- }
- limit *= 1024;
- epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit( limit );
- LOG_PRINT_L0("Set limit-down to " << limit/1024 << " kB/s");
- return true;
- }
+ bool node_server<t_payload_net_handler>::set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit)
+ {
+ this->islimitup=true;
+
+ if (limit==-1) {
+ limit=default_limit_up;
+ this->islimitup=false;
+ }
+
+ limit *= 1024;
+ epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit( limit );
+ LOG_PRINT_L0("Set limit-up to " << limit/1024 << " kB/s");
+ return true;
+ }
template<class t_payload_net_handler>
- bool node_server<t_payload_net_handler>::set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit)
- {
- int64_t limit_up = 0;
- int64_t limit_down = 0;
-
- if(limit == -1)
- {
- limit_up = default_limit_up * 1024;
- limit_down = default_limit_down * 1024;
- }
- else
- {
- limit_up = limit * 1024;
- limit_down = limit * 1024;
- }
- if(!this->islimitup) {
- epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit(limit_up);
- LOG_PRINT_L0("Set limit-up to " << limit_up/1024 << " kB/s");
- }
- if(!this->islimitdown) {
- epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit(limit_down);
- LOG_PRINT_L0("Set limit-down to " << limit_down/1024 << " kB/s");
- }
+ bool node_server<t_payload_net_handler>::set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit)
+ {
+ this->islimitdown=true;
+ if(limit==-1) {
+ limit=default_limit_down;
+ this->islimitdown=false;
+ }
+ limit *= 1024;
+ epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit( limit );
+ LOG_PRINT_L0("Set limit-down to " << limit/1024 << " kB/s");
+ return true;
+ }
+
+ template<class t_payload_net_handler>
+ bool node_server<t_payload_net_handler>::set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit)
+ {
+ int64_t limit_up = 0;
+ int64_t limit_down = 0;
- return true;
- }
+ if(limit == -1)
+ {
+ limit_up = default_limit_up * 1024;
+ limit_down = default_limit_down * 1024;
+ }
+ else
+ {
+ limit_up = limit * 1024;
+ limit_down = limit * 1024;
+ }
+ if(!this->islimitup) {
+ epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit(limit_up);
+ LOG_PRINT_L0("Set limit-up to " << limit_up/1024 << " kB/s");
+ }
+ if(!this->islimitdown) {
+ epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit(limit_down);
+ LOG_PRINT_L0("Set limit-down to " << limit_down/1024 << " kB/s");
+ }
+
+ return true;
+ }
}