aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/build.yml53
-rw-r--r--CMakeLists.txt12
-rw-r--r--Dockerfile2
-rw-r--r--contrib/depends/packages/boost.mk2
-rw-r--r--contrib/epee/src/CMakeLists.txt7
-rw-r--r--contrib/epee/src/byte_slice.cpp4
-rw-r--r--docs/COMPILING_DEBUGGING_TESTING.md86
-rw-r--r--src/cryptonote_protocol/cryptonote_protocol_handler.inl2
-rw-r--r--src/p2p/net_node.inl3
-rw-r--r--src/serialization/json_object.h2
-rw-r--r--src/simplewallet/simplewallet.cpp2
-rw-r--r--src/wallet/api/wallet.cpp62
-rw-r--r--src/wallet/api/wallet.h2
-rw-r--r--src/wallet/api/wallet2_api.h13
-rw-r--r--src/wallet/wallet2.cpp49
-rw-r--r--src/wallet/wallet2.h2
-rwxr-xr-xtests/functional_tests/functional_tests_rpc.py9
-rwxr-xr-xtests/functional_tests/mining.py94
-rwxr-xr-xtests/functional_tests/util_resources.py4
-rw-r--r--tests/unit_tests/epee_boosted_tcp_server.cpp3
-rw-r--r--tests/unit_tests/json_serialization.cpp2
-rw-r--r--utils/build_scripts/android32.Dockerfile2
-rw-r--r--utils/build_scripts/android64.Dockerfile2
-rw-r--r--utils/gpg_keys/mj-xmr.asc52
24 files changed, 373 insertions, 98 deletions
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index aa6afbfbc..36eab5027 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -2,6 +2,15 @@ name: ci/gh-actions/cli
on: [push, pull_request]
+# The below variables reduce repetitions across similar targets
+env:
+ REMOVE_BUNDLED_BOOST : rm -rf /usr/local/share/boost
+ APT_INSTALL_LINUX: 'sudo apt -y install build-essential cmake libboost-all-dev miniupnpc libunbound-dev graphviz doxygen libunwind8-dev pkg-config libssl-dev libzmq3-dev libsodium-dev libhidapi-dev libnorm-dev libusb-1.0-0-dev libpgm-dev libprotobuf-dev protobuf-compiler ccache'
+ APT_SET_CONF: |
+ echo "Acquire::Retries \"3\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
+ echo "Acquire::http::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
+ echo "Acquire::ftp::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
+
jobs:
build-macos:
runs-on: macOS-latest
@@ -66,16 +75,13 @@ jobs:
key: ccache-ubuntu-build-${{ github.sha }}
restore-keys: ccache-ubuntu-build-
- name: remove bundled boost
- run: sudo rm -rf /usr/local/share/boost
+ run: ${{env.REMOVE_BUNDLED_BOOST}}
- name: set apt conf
- run: |
- echo "Acquire::Retries \"3\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
- echo "Acquire::http::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
- echo "Acquire::ftp::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
+ run: ${{env.APT_SET_CONF}}
- name: update apt
run: sudo apt update
- name: install monero dependencies
- run: sudo apt -y install build-essential cmake libboost-all-dev miniupnpc libunbound-dev graphviz doxygen libunwind8-dev pkg-config libssl-dev libzmq3-dev libsodium-dev libhidapi-dev libnorm-dev libusb-1.0-0-dev libpgm-dev libprotobuf-dev protobuf-compiler ccache
+ run: ${{env.APT_INSTALL_LINUX}}
- name: build
run: |
ccache --max-size=150M
@@ -96,16 +102,13 @@ jobs:
key: ccache-ubuntu-libwallet-${{ github.sha }}
restore-keys: ccache-ubuntu-libwallet-
- name: remove bundled boost
- run: sudo rm -rf /usr/local/share/boost
+ run: ${{env.REMOVE_BUNDLED_BOOST}}
- name: set apt conf
- run: |
- echo "Acquire::Retries \"3\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
- echo "Acquire::http::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
- echo "Acquire::ftp::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
+ run: ${{env.APT_SET_CONF}}
- name: update apt
run: sudo apt update
- name: install monero dependencies
- run: sudo apt -y install build-essential cmake libboost-all-dev miniupnpc libunbound-dev graphviz doxygen libunwind8-dev pkg-config libssl-dev libzmq3-dev libsodium-dev libhidapi-dev libnorm-dev libusb-1.0-0-dev libpgm-dev libprotobuf-dev protobuf-compiler ccache
+ run: ${{env.APT_INSTALL_LINUX}}
- name: build
run: |
ccache --max-size=150M
@@ -129,16 +132,13 @@ jobs:
key: test-ubuntu-ccache-${{ github.sha }}
restore-keys: test-ubuntu-ccache-
- name: remove bundled boost
- run: sudo rm -rf /usr/local/share/boost
+ run: ${{env.REMOVE_BUNDLED_BOOST}}
- name: set apt conf
- run: |
- echo "Acquire::Retries \"3\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
- echo "Acquire::http::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
- echo "Acquire::ftp::Timeout \"120\";" | sudo tee -a /etc/apt/apt.conf.d/80-custom
+ run: ${{env.APT_SET_CONF}}
- name: update apt
run: sudo apt update
- name: install monero dependencies
- run: sudo apt -y install build-essential cmake libboost-all-dev miniupnpc libunbound-dev graphviz doxygen libunwind8-dev pkg-config libssl-dev libzmq3-dev libsodium-dev libhidapi-dev libnorm-dev libusb-1.0-0-dev libpgm-dev libprotobuf-dev protobuf-compiler ccache
+ run: ${{env.APT_INSTALL_LINUX}}
- name: install Python dependencies
run: pip install requests psutil monotonic
- name: tests
@@ -154,3 +154,20 @@ jobs:
# ARCH="default" (not "native") ensures, that a different execution host can execute binaries compiled elsewhere.
# BUILD_SHARED_LIBS=ON speeds up the linkage part a bit, reduces size, and is the only place where the dynamic linkage is tested.
+ source-archive:
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/checkout@v1
+ with:
+ submodules: recursive
+ - name: archive
+ run: |
+ pip install git-archive-all
+ export VERSION="monero-$(git describe)"
+ export OUTPUT="$VERSION.tar"
+ echo "OUTPUT=$OUTPUT" >> $GITHUB_ENV
+ /home/runner/.local/bin/git-archive-all --prefix "$VERSION/" --force-submodules "$OUTPUT"
+ - uses: actions/upload-artifact@v2
+ with:
+ name: ${{ env.OUTPUT }}
+ path: /home/runner/work/monero/monero/${{ env.OUTPUT }}
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3ca1c16fb..fecea318b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -164,6 +164,18 @@ function (monero_add_minimal_executable name)
monero_set_target_no_relink( ${name} )
endfunction()
+# Finds all headers in a directory and its subdirs, to be able to search for them and autosave in IDEs.
+#
+# Parameters:
+# - headers_found: Output variable, which will hold the found headers
+# - module_root_dir: The search path for the headers. Typically it will be the module's root dir.
+macro (monero_find_all_headers headers_found module_root_dir)
+ file(GLOB ${headers_found}
+ "${module_root_dir}/*.h*" # h* will include hpps as well.
+ "${module_root_dir}/**/*.h*" # Any number of subdirs will be included.
+)
+endmacro()
+
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
message(STATUS "Setting default build type: ${CMAKE_BUILD_TYPE}")
diff --git a/Dockerfile b/Dockerfile
index 51fd51e1c..f21f74ac3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -46,7 +46,7 @@ ARG BOOST_VERSION=1_70_0
ARG BOOST_VERSION_DOT=1.70.0
ARG BOOST_HASH=430ae8354789de4fd19ee52f3b1f739e1fba576f0aded0897c3c2bc00fb38778
RUN set -ex \
- && curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://dl.bintray.com/boostorg/release/${BOOST_VERSION_DOT}/source/boost_${BOOST_VERSION}.tar.bz2 \
+ && curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION_DOT}/boost_${BOOST_VERSION}.tar.bz2 \
&& echo "${BOOST_HASH} boost_${BOOST_VERSION}.tar.bz2" | sha256sum -c \
&& tar -xvf boost_${BOOST_VERSION}.tar.bz2 \
&& cd boost_${BOOST_VERSION} \
diff --git a/contrib/depends/packages/boost.mk b/contrib/depends/packages/boost.mk
index 0d241928e..4571d4232 100644
--- a/contrib/depends/packages/boost.mk
+++ b/contrib/depends/packages/boost.mk
@@ -1,6 +1,6 @@
package=boost
$(package)_version=1_64_0
-$(package)_download_path=https://dl.bintray.com/boostorg/release/1.64.0/source/
+$(package)_download_path=https://downloads.sourceforge.net/project/boost/boost/1.64.0/
$(package)_file_name=$(package)_$($(package)_version).tar.bz2
$(package)_sha256_hash=7bcc5caace97baa948931d712ea5f37038dbb1c5d89b43ad4def4ed7cb683332
$(package)_dependencies=libiconv
diff --git a/contrib/epee/src/CMakeLists.txt b/contrib/epee/src/CMakeLists.txt
index 368f49c95..0f0a6ecad 100644
--- a/contrib/epee/src/CMakeLists.txt
+++ b/contrib/epee/src/CMakeLists.txt
@@ -28,11 +28,8 @@
set(EPEE_INCLUDE_DIR_BASE "${CMAKE_CURRENT_SOURCE_DIR}/../include")
-# Adding headers to the file list, to be able to search for them in IDEs.
-file(GLOB EPEE_HEADERS_PUBLIC
- "${EPEE_INCLUDE_DIR_BASE}/*.h*" # h* will include hpps as well.
- "${EPEE_INCLUDE_DIR_BASE}/**/*.h*" # Any number of subdirs will be included.
-)
+# Add headers to the file list, to be able to search for them and autosave in IDEs.
+monero_find_all_headers(EPEE_HEADERS_PUBLIC "${EPEE_INCLUDE_DIR_BASE}")
add_library(epee STATIC byte_slice.cpp byte_stream.cpp hex.cpp abstract_http_client.cpp http_auth.cpp mlog.cpp net_helper.cpp net_utils_base.cpp string_tools.cpp
wipeable_string.cpp levin_base.cpp memwipe.c connection_basic.cpp network_throttle.cpp network_throttle-detail.cpp mlocker.cpp buffer.cpp net_ssl.cpp
diff --git a/contrib/epee/src/byte_slice.cpp b/contrib/epee/src/byte_slice.cpp
index 453b63a4c..430853c64 100644
--- a/contrib/epee/src/byte_slice.cpp
+++ b/contrib/epee/src/byte_slice.cpp
@@ -151,7 +151,7 @@ namespace epee
: byte_slice()
{
std::size_t space_needed = 0;
- for (const auto source : sources)
+ for (const auto& source : sources)
space_needed += source.size();
if (space_needed)
@@ -160,7 +160,7 @@ namespace epee
span<std::uint8_t> out{reinterpret_cast<std::uint8_t*>(storage.get() + 1), space_needed};
portion_ = {out.data(), out.size()};
- for (const auto source : sources)
+ for (const auto& source : sources)
{
std::memcpy(out.data(), source.data(), source.size());
if (out.remove_prefix(source.size()) < source.size())
diff --git a/docs/COMPILING_DEBUGGING_TESTING.md b/docs/COMPILING_DEBUGGING_TESTING.md
new file mode 100644
index 000000000..f5c202303
--- /dev/null
+++ b/docs/COMPILING_DEBUGGING_TESTING.md
@@ -0,0 +1,86 @@
+# Compiling, debugging and testing efficiently
+
+This document describes ways of compiling, debugging and testing efficiently for various use cases.
+The intented audience are developers, who want to leverage newly added tricks to Monero via `CMake`. The document will lower the entry point for these developers.
+Before reading this document, please consult section "Build instructions" in the main README.md.
+Some information from README.md will be repeated here, but the aim is to go beyond it.
+
+## Basic compilation
+
+Monero can be compiled via the main `Makefile`, using one of several targets listed there.
+The targets are actually presets for `CMake` calls with various options, plus `make` commands for building or in some cases `make test` for testing.
+It is possible to extract these `CMake` calls and modify them for your specific needs. For example, a minimal external cmake command to compile Monero, executed from within a newly created build directory could look like:
+
+`cmake -S "$DIR_SRC" -DCMAKE_BUILD_TYPE=Release && make`
+
+where the variable `DIR_SRC` is expected to store the path to the Monero source code.
+
+## Use cases
+
+### Test Driven Development (TDD) - shared libraries for release builds
+
+Building shared libraries spares a lot of disk space and linkage time. By default only the debug builds produce shared libraries. If you'd like to produce dynamic libraries for the release build for the same reasons as it's being done for the debug version, then you need to add the `BUILD_SHARED_LIBS=ON` flag to the `CMake` call, like the following:
+
+`cmake -S "$DIR_SRC" -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=ON && make`
+
+A perfect use case for the above call is following the Test Driven Development (TDD) principles. In a nutshell, you'd first write a couple of tests, which describe the (new) requirements of the class/method that you're about to write or modify. The tests will typically compile for quite a long time, so ideally write them once. After you're done with the tests, the only thing left to do is to keep modifying the implementation for as long as the tests are failing. If the implementation is contained properly within a .cpp file, then the only time cost to be paid will be compiling the single source file and generating the implementation's shared library. The test itself will not have to be touched and will pick up the new version of the implementation (via the shared library) upon the next execution of the test.
+
+### Project generation for IDEs
+
+CMake allows to generate project files for many IDEs. The list of supported project files can be obtained by writing in the console:
+
+`cmake -G`
+
+For instance, in order to generate Makefiles and project files for the Code::Blocks IDE, this part of the call would look like the following:
+
+`cmake -G "CodeBlocks - Unix Makefiles" (...)`
+
+The additional artifact of the above call is the `monero.cbp` Code::Blocks project file in the build directory.
+
+### Debugging in Code::Blocks (CB)
+
+First prepare the build directory for debugging using the following example command, assuming, that the path to the source dir is being held in the DIR_SRC variable, and using 2 cores:
+
+`cmake -S "$DIR_SRC" -G "CodeBlocks - Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTS=ON && make -j 2`
+
+After a successful build, open the `monero.cbp` with CB. From the CB's menu bar select the target, that you want debug. Assuming these are unit tests:
+
+`Build -> Select target -> Select target -> unit_tests`
+
+In order to lower the turnaround times, we will run a specific portion of code of interest, without having to go through all the time costly initialization and execution of unrelated parts. For this we'll use GTest's capabilities of test filtering. From the build directory run the following command to learn all the registered tests:
+
+`tests/unit_tests/unit_tests --gtest_list_tests`
+
+For example, if you're only interested in logging, you'd find in the list the label `logging.` and its subtests. To execute all the logging tests, you'd write in the console:
+
+`tests/unit_tests/unit_tests --gtest_filter="logging.*"`
+
+This parameter is what we need to transfer to CB, in order to reflect the same behaviour in the CB's debugger. From the main menu select:
+
+`Project -> Set program's arguments...`
+
+Then in the `Program's arguments` textbox you'd write in this case:
+
+`--gtest_filter="logging.*"`
+
+Verify if the expected UTs are being properly executed with `F9` or select:
+
+`Build -> Build and run`
+
+If everything looks fine, then after setting some breakpoints of your choice, the target is ready for debugging in CB via:
+
+`Debug -> Start/Continue`
+
+## To be done (and merged):
+### Multihost parallel compilation
+https://github.com/monero-project/monero/pull/7160
+
+### Faster core_tests with caching
+https://github.com/monero-project/monero/pull/5821
+
+### Precompiled headers
+https://github.com/monero-project/monero/pull/7216
+
+### Unity builds
+https://github.com/monero-project/monero/pull/7217
+
diff --git a/src/cryptonote_protocol/cryptonote_protocol_handler.inl b/src/cryptonote_protocol/cryptonote_protocol_handler.inl
index afc81f552..685968c08 100644
--- a/src/cryptonote_protocol/cryptonote_protocol_handler.inl
+++ b/src/cryptonote_protocol/cryptonote_protocol_handler.inl
@@ -2310,7 +2310,7 @@ skip:
const uint32_t peer_stripe = tools::get_pruning_stripe(context.m_pruning_seed);
const uint32_t first_stripe = tools::get_pruning_stripe(span.first, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES);
const uint32_t last_stripe = tools::get_pruning_stripe(span.first + span.second - 1, context.m_remote_blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES);
- if ((((first_stripe && peer_stripe != first_stripe) || (last_stripe && peer_stripe != last_stripe)) && !m_sync_pruned_blocks) || (m_sync_pruned_blocks && req.prune))
+ if (((first_stripe && peer_stripe != first_stripe) || (last_stripe && peer_stripe != last_stripe)) && !m_sync_pruned_blocks)
{
MDEBUG(context << "We need full data, but the peer does not have it, dropping peer");
return false;
diff --git a/src/p2p/net_node.inl b/src/p2p/net_node.inl
index e1d6d1e10..a0b8438b2 100644
--- a/src/p2p/net_node.inl
+++ b/src/p2p/net_node.inl
@@ -682,11 +682,13 @@ namespace nodetool
full_addrs.insert("212.83.175.67:28080");
full_addrs.insert("212.83.172.165:28080");
full_addrs.insert("192.110.160.146:28080");
+ full_addrs.insert("88.99.173.38:28080");
}
else if (m_nettype == cryptonote::STAGENET)
{
full_addrs.insert("162.210.173.150:38080");
full_addrs.insert("192.110.160.146:38080");
+ full_addrs.insert("88.99.173.38:38080");
}
else if (m_nettype == cryptonote::FAKECHAIN)
{
@@ -701,6 +703,7 @@ namespace nodetool
full_addrs.insert("209.250.243.248:18080");
full_addrs.insert("104.238.221.81:18080");
full_addrs.insert("66.85.74.134:18080");
+ full_addrs.insert("88.99.173.38:18080");
}
return full_addrs;
}
diff --git a/src/serialization/json_object.h b/src/serialization/json_object.h
index de14c8911..35ea990b3 100644
--- a/src/serialization/json_object.h
+++ b/src/serialization/json_object.h
@@ -365,7 +365,7 @@ inline typename std::enable_if<sfinae::is_vector_like<Vec>::value, void>::type t
static_assert(!std::is_same<value_type, unsigned char>::value, "encoding an array of unsigned char is faster as hex");
dest.StartArray();
- for (const auto& t : vec)
+ for (auto t : vec)
toJsonValue(dest, t);
dest.EndArray();
}
diff --git a/src/simplewallet/simplewallet.cpp b/src/simplewallet/simplewallet.cpp
index a7856d60f..e859a4693 100644
--- a/src/simplewallet/simplewallet.cpp
+++ b/src/simplewallet/simplewallet.cpp
@@ -5895,7 +5895,7 @@ bool simple_wallet::refresh_main(uint64_t start_height, enum ResetType reset, bo
if (reset != ResetNone)
{
if (reset == ResetSoftKeepKI)
- height_pre = m_wallet->hash_m_transfers(-1, transfer_hash_pre);
+ height_pre = m_wallet->hash_m_transfers(boost::none, transfer_hash_pre);
m_wallet->rescan_blockchain(reset == ResetHard, false, reset == ResetSoftKeepKI);
}
diff --git a/src/wallet/api/wallet.cpp b/src/wallet/api/wallet.cpp
index adff042ad..db3049f9e 100644
--- a/src/wallet/api/wallet.cpp
+++ b/src/wallet/api/wallet.cpp
@@ -1214,6 +1214,68 @@ bool WalletImpl::importKeyImages(const string &filename)
return true;
}
+bool WalletImpl::exportOutputs(const string &filename, bool all)
+{
+ if (m_wallet->key_on_device())
+ {
+ setStatusError(string(tr("Not supported on HW wallets.")) + filename);
+ return false;
+ }
+
+ try
+ {
+ std::string data = m_wallet->export_outputs_to_str(all);
+ bool r = m_wallet->save_to_file(filename, data);
+ if (!r)
+ {
+ LOG_ERROR("Failed to save file " << filename);
+ setStatusError(string(tr("Failed to save file: ")) + filename);
+ return false;
+ }
+ }
+ catch (const std::exception &e)
+ {
+ LOG_ERROR("Error exporting outputs: " << e.what());
+ setStatusError(string(tr("Error exporting outputs: ")) + e.what());
+ return false;
+ }
+
+ LOG_PRINT_L2("Outputs exported to " << filename);
+ return true;
+}
+
+bool WalletImpl::importOutputs(const string &filename)
+{
+ if (m_wallet->key_on_device())
+ {
+ setStatusError(string(tr("Not supported on HW wallets.")) + filename);
+ return false;
+ }
+
+ std::string data;
+ bool r = m_wallet->load_from_file(filename, data);
+ if (!r)
+ {
+ LOG_ERROR("Failed to read file: " << filename);
+ setStatusError(string(tr("Failed to read file: ")) + filename);
+ return false;
+ }
+
+ try
+ {
+ size_t n_outputs = m_wallet->import_outputs_from_str(data);
+ LOG_PRINT_L2(std::to_string(n_outputs) << " outputs imported");
+ }
+ catch (const std::exception &e)
+ {
+ LOG_ERROR("Failed to import outputs: " << e.what());
+ setStatusError(string(tr("Failed to import outputs: ")) + e.what());
+ return false;
+ }
+
+ return true;
+}
+
void WalletImpl::addSubaddressAccount(const std::string& label)
{
m_wallet->add_subaddress_account(label);
diff --git a/src/wallet/api/wallet.h b/src/wallet/api/wallet.h
index 13b33d1cd..ce2d7d7e4 100644
--- a/src/wallet/api/wallet.h
+++ b/src/wallet/api/wallet.h
@@ -167,6 +167,8 @@ public:
virtual UnsignedTransaction * loadUnsignedTx(const std::string &unsigned_filename) override;
bool exportKeyImages(const std::string &filename, bool all = false) override;
bool importKeyImages(const std::string &filename) override;
+ bool exportOutputs(const std::string &filename, bool all = false) override;
+ bool importOutputs(const std::string &filename) override;
virtual void disposeTransaction(PendingTransaction * t) override;
virtual uint64_t estimateTransactionFee(const std::vector<std::pair<std::string, uint64_t>> &destinations,
diff --git a/src/wallet/api/wallet2_api.h b/src/wallet/api/wallet2_api.h
index 320b458bd..e34332734 100644
--- a/src/wallet/api/wallet2_api.h
+++ b/src/wallet/api/wallet2_api.h
@@ -919,6 +919,19 @@ struct Wallet
*/
virtual bool importKeyImages(const std::string &filename) = 0;
+ /*!
+ * \brief importOutputs - exports outputs to file
+ * \param filename
+ * \return - true on success
+ */
+ virtual bool exportOutputs(const std::string &filename, bool all = false) = 0;
+
+ /*!
+ * \brief importOutputs - imports outputs from file
+ * \param filename
+ * \return - true on success
+ */
+ virtual bool importOutputs(const std::string &filename) = 0;
virtual TransactionHistory * history() = 0;
virtual AddressBook * addressBook() = 0;
diff --git a/src/wallet/wallet2.cpp b/src/wallet/wallet2.cpp
index c3325c24d..0b310111e 100644
--- a/src/wallet/wallet2.cpp
+++ b/src/wallet/wallet2.cpp
@@ -12228,7 +12228,7 @@ uint64_t wallet2::get_approximate_blockchain_height() const
// Calculated blockchain height
uint64_t approx_blockchain_height = fork_block + (time(NULL) - fork_time)/seconds_per_block;
// testnet got some huge rollbacks, so the estimation is way off
- static const uint64_t approximate_testnet_rolled_back_blocks = 303967;
+ static const uint64_t approximate_testnet_rolled_back_blocks = 342100;
if (m_nettype == TESTNET && approx_blockchain_height > approximate_testnet_rolled_back_blocks)
approx_blockchain_height -= approximate_testnet_rolled_back_blocks;
LOG_PRINT_L2("Calculated blockchain height: " << approx_blockchain_height);
@@ -14189,15 +14189,15 @@ void wallet2::hash_m_transfer(const transfer_details & transfer, crypto::hash &h
KECCAK_CTX state;
keccak_init(&state);
keccak_update(&state, (const uint8_t *) transfer.m_txid.data, sizeof(transfer.m_txid.data));
- keccak_update(&state, (const uint8_t *) transfer.m_internal_output_index, sizeof(transfer.m_internal_output_index));
- keccak_update(&state, (const uint8_t *) transfer.m_global_output_index, sizeof(transfer.m_global_output_index));
- keccak_update(&state, (const uint8_t *) transfer.m_amount, sizeof(transfer.m_amount));
+ keccak_update(&state, (const uint8_t *) &transfer.m_internal_output_index, sizeof(transfer.m_internal_output_index));
+ keccak_update(&state, (const uint8_t *) &transfer.m_global_output_index, sizeof(transfer.m_global_output_index));
+ keccak_update(&state, (const uint8_t *) &transfer.m_amount, sizeof(transfer.m_amount));
keccak_finish(&state, (uint8_t *) hash.data);
}
//----------------------------------------------------------------------------------------------------
-uint64_t wallet2::hash_m_transfers(int64_t transfer_height, crypto::hash &hash) const
+uint64_t wallet2::hash_m_transfers(boost::optional<uint64_t> transfer_height, crypto::hash &hash) const
{
- CHECK_AND_ASSERT_THROW_MES(transfer_height > (int64_t)m_transfers.size(), "Hash height is greater than number of transfers");
+ CHECK_AND_ASSERT_THROW_MES(!transfer_height || *transfer_height <= m_transfers.size(), "Hash height is greater than number of transfers");
KECCAK_CTX state;
crypto::hash tmp_hash{};
@@ -14205,12 +14205,12 @@ uint64_t wallet2::hash_m_transfers(int64_t transfer_height, crypto::hash &hash)
keccak_init(&state);
for(const transfer_details & transfer : m_transfers){
- if (transfer_height >= 0 && current_height >= (uint64_t)transfer_height){
+ if (transfer_height && current_height >= *transfer_height){
break;
}
hash_m_transfer(transfer, tmp_hash);
- keccak_update(&state, (const uint8_t *) transfer.m_block_height, sizeof(transfer.m_block_height));
+ keccak_update(&state, (const uint8_t *) &transfer.m_block_height, sizeof(transfer.m_block_height));
keccak_update(&state, (const uint8_t *) tmp_hash.data, sizeof(tmp_hash.data));
current_height += 1;
}
@@ -14222,23 +14222,28 @@ uint64_t wallet2::hash_m_transfers(int64_t transfer_height, crypto::hash &hash)
void wallet2::finish_rescan_bc_keep_key_images(uint64_t transfer_height, const crypto::hash &hash)
{
// Compute hash of m_transfers, if differs there had to be BC reorg.
- crypto::hash new_transfers_hash{};
- hash_m_transfers((int64_t) transfer_height, new_transfers_hash);
+ if (transfer_height <= m_transfers.size()) {
+ crypto::hash new_transfers_hash{};
+ hash_m_transfers(transfer_height, new_transfers_hash);
- if (new_transfers_hash != hash)
- {
- // Soft-Reset to avoid inconsistency in case of BC reorg.
- clear_soft(false); // keep_key_images works only with soft reset.
- THROW_WALLET_EXCEPTION_IF(true, error::wallet_internal_error, "Transfers changed during rescan, soft or hard rescan is needed");
- }
+ if (new_transfers_hash == hash) {
+ // Restore key images in m_transfers from m_key_images
+ for(auto it = m_key_images.begin(); it != m_key_images.end(); it++)
+ {
+ THROW_WALLET_EXCEPTION_IF(it->second >= m_transfers.size(),
+ error::wallet_internal_error,
+ "Key images cache contains illegal transfer offset");
+ m_transfers[it->second].m_key_image = it->first;
+ m_transfers[it->second].m_key_image_known = true;
+ }
- // Restore key images in m_transfers from m_key_images
- for(auto it = m_key_images.begin(); it != m_key_images.end(); it++)
- {
- THROW_WALLET_EXCEPTION_IF(it->second >= m_transfers.size(), error::wallet_internal_error, "Key images cache contains illegal transfer offset");
- m_transfers[it->second].m_key_image = it->first;
- m_transfers[it->second].m_key_image_known = true;
+ return;
+ }
}
+
+ // Soft-Reset to avoid inconsistency in case of BC reorg.
+ clear_soft(false); // keep_key_images works only with soft reset.
+ THROW_WALLET_EXCEPTION_IF(true, error::wallet_internal_error, "Transfers changed during rescan, soft or hard rescan is needed");
}
//----------------------------------------------------------------------------------------------------
uint64_t wallet2::get_bytes_sent() const
diff --git a/src/wallet/wallet2.h b/src/wallet/wallet2.h
index e96a6b51c..facf9878d 100644
--- a/src/wallet/wallet2.h
+++ b/src/wallet/wallet2.h
@@ -1547,7 +1547,7 @@ private:
bool is_tx_spendtime_unlocked(uint64_t unlock_time, uint64_t block_height);
void hash_m_transfer(const transfer_details & transfer, crypto::hash &hash) const;
- uint64_t hash_m_transfers(int64_t transfer_height, crypto::hash &hash) const;
+ uint64_t hash_m_transfers(boost::optional<uint64_t> transfer_height, crypto::hash &hash) const;
void finish_rescan_bc_keep_key_images(uint64_t transfer_height, const crypto::hash &hash);
void enable_dns(bool enable) { m_use_dns = enable; }
void set_offline(bool offline = true);
diff --git a/tests/functional_tests/functional_tests_rpc.py b/tests/functional_tests/functional_tests_rpc.py
index 79e04b8a6..450552cf8 100755
--- a/tests/functional_tests/functional_tests_rpc.py
+++ b/tests/functional_tests/functional_tests_rpc.py
@@ -44,6 +44,7 @@ N_MONERODS = 4
N_WALLETS = 5
WALLET_DIRECTORY = builddir + "/functional-tests-directory"
+FUNCTIONAL_TESTS_DIRECTORY = builddir + "/tests/functional_tests"
DIFFICULTY = 10
monerod_base = [builddir + "/bin/monerod", "--regtest", "--fixed-difficulty", str(DIFFICULTY), "--no-igd", "--p2p-bind-port", "monerod_p2p_port", "--rpc-bind-port", "monerod_rpc_port", "--zmq-rpc-bind-port", "monerod_zmq_port", "--non-interactive", "--disable-dns-checkpoints", "--check-updates", "disabled", "--rpc-ssl", "disabled", "--data-dir", "monerod_data_dir", "--log-level", "1"]
@@ -71,14 +72,14 @@ for i in range(N_MONERODS):
command_lines.append([str(18180+i) if x == "monerod_rpc_port" else str(18280+i) if x == "monerod_p2p_port" else str(18380+i) if x == "monerod_zmq_port" else builddir + "/functional-tests-directory/monerod" + str(i) if x == "monerod_data_dir" else x for x in monerod_base])
if i < len(monerod_extra):
command_lines[-1] += monerod_extra[i]
- outputs.append(open(builddir + '/tests/functional_tests/monerod' + str(i) + '.log', 'a+'))
+ outputs.append(open(FUNCTIONAL_TESTS_DIRECTORY + '/monerod' + str(i) + '.log', 'a+'))
ports.append(18180+i)
for i in range(N_WALLETS):
command_lines.append([str(18090+i) if x == "wallet_port" else x for x in wallet_base])
if i < len(wallet_extra):
command_lines[-1] += wallet_extra[i]
- outputs.append(open(builddir + '/tests/functional_tests/wallet' + str(i) + '.log', 'a+'))
+ outputs.append(open(FUNCTIONAL_TESTS_DIRECTORY + '/wallet' + str(i) + '.log', 'a+'))
ports.append(18090+i)
print('Starting servers...')
@@ -89,9 +90,11 @@ try:
PYTHONPATH += srcdir + '/../../utils/python-rpc'
os.environ['PYTHONPATH'] = PYTHONPATH
os.environ['WALLET_DIRECTORY'] = WALLET_DIRECTORY
+ os.environ['FUNCTIONAL_TESTS_DIRECTORY'] = FUNCTIONAL_TESTS_DIRECTORY
+ os.environ['SOURCE_DIRECTORY'] = srcdir
os.environ['PYTHONIOENCODING'] = 'utf-8'
os.environ['DIFFICULTY'] = str(DIFFICULTY)
- os.environ['MAKE_TEST_SIGNATURE'] = builddir + '/tests/functional_tests/make_test_signature'
+ os.environ['MAKE_TEST_SIGNATURE'] = FUNCTIONAL_TESTS_DIRECTORY + '/make_test_signature'
os.environ['SEEDHASH_EPOCH_BLOCKS'] = "8"
os.environ['SEEDHASH_EPOCH_LAG'] = "4"
diff --git a/tests/functional_tests/mining.py b/tests/functional_tests/mining.py
index cb2fd66e1..7ecbdeed5 100755
--- a/tests/functional_tests/mining.py
+++ b/tests/functional_tests/mining.py
@@ -42,6 +42,10 @@ Test the following RPCs:
- start_mining
- stop_mining
- mining_status
+
+Control the behavior with these environment variables:
+ MINING_NO_MEASUREMENT - set to anything to use large enough and fixed mining timeouts
+ MINING_SILENT - set to anything to disable mining logging
"""
from framework.daemon import Daemon
@@ -77,8 +81,11 @@ class MiningTest():
cores_init = multiprocessing.cpu_count() # RX init uses all cores
cores_mine = 1 # Mining uses a parametric number of cores
- time_pi_single_cpu = self.measure_cpu_power_get_time(cores_mine)
- time_pi_all_cores = self.measure_cpu_power_get_time(cores_init)
+ is_mining_measurent = 'MINING_NO_MEASUREMENT' not in os.environ
+
+ if is_mining_measurent: # A dynamic calculation of the CPU power requested
+ time_pi_single_cpu = self.measure_cpu_power_get_time(cores_mine)
+ time_pi_all_cores = self.measure_cpu_power_get_time(cores_init)
# This is the last measurement, since it takes very little time and can be placed timewise-closer to the mining itself.
available_ram = self.get_available_ram() # So far no ideas how to use this var, other than printing it
@@ -110,38 +117,42 @@ class MiningTest():
target_height = initial_height + 5
height = initial_height
- """
- Randomx init has high variance on CI machines due to noisy neighbors,
- taking up resources in parallel (including by our own jobs).
-
- Mining is organized in the following scheme:
- 1) first loop's pass: RandomX init and mining
- 2) every next pass: only mining
- Pass 1) takes much more time than pass 2)
- Pass 1) uses all cores, pass 2) just one (currently)
- For the above reasons both passes need separate timeouts and adjustments.
- After the first pass, the timeout is being reset to a lower value.
- """
-
- def calc_timeout(seconds_constant, time_pi, cores):
+ if not is_mining_measurent:
+ timeout_init = 600
+ timeout_mine = 300
+ else:
"""
- The time it took to calculate pi under certain conditions
- is proportional to the time it will take to calculate the real job.
+ Randomx init has high variance on CI machines due to noisy neighbors,
+ taking up resources in parallel (including by our own jobs).
- The number of cores used decreases the time almost linearly.
+ Mining is organized in the following scheme:
+ 1) first loop's pass: RandomX init and mining
+ 2) every next pass: only mining
+ Pass 1) takes much more time than pass 2)
+ Pass 1) uses all cores, pass 2) just one (currently)
+ For the above reasons both passes need separate timeouts and adjustments.
+ After the first pass, the timeout is being reset to a lower value.
"""
- timeout = float(seconds_constant) * time_pi / float(cores)
- return timeout
- timeout_base_init = 60 # RX init needs more time
- timeout_base_mine = 20
- timeout_init = calc_timeout(timeout_base_init, time_pi_all_cores, cores_init)
- timeout_mine = calc_timeout(timeout_base_mine, time_pi_single_cpu, cores_mine)
-
- msg = "Timeout for {} adjusted for the currently available CPU power, is {:.1f} s"
- print(msg.format("init, ", timeout_init))
- print(msg.format("mining,", timeout_mine))
+ def calc_timeout(seconds_constant, time_pi, cores):
+ """
+ The time it took to calculate pi under certain conditions
+ is proportional to the time it will take to calculate the real job.
+
+ The number of cores used decreases the time almost linearly.
+ """
+ timeout = float(seconds_constant) * time_pi / float(cores)
+ return timeout
+
+ timeout_base_init = 60 # RX init needs more time
+ timeout_base_mine = 20
+ timeout_init = calc_timeout(timeout_base_init, time_pi_all_cores, cores_init)
+ timeout_mine = calc_timeout(timeout_base_mine, time_pi_single_cpu, cores_mine)
+ msg_timeout_src = "adjusted for the currently available CPU power" if is_mining_measurent else "selected to have the default value"
+ msg = "Timeout for {} {}, is {:.1f} s"
+ self.print_mining_info(msg.format("init, ", msg_timeout_src, timeout_init))
+ self.print_mining_info(msg.format("mining,", msg_timeout_src, timeout_mine))
timeout = timeout_init
rx_inited = False # Gets initialized in the first pass of the below loop
while height < target_height:
@@ -197,17 +208,18 @@ class MiningTest():
res = wallet.stop_mining()
res_status = daemon.mining_status()
assert res_status.active == False
-
+
def measure_cpu_power_get_time(self, cores):
- print("Measuring the currently available CPU power...")
- time_pi = util_resources.get_time_pi_seconds(cores)
- print("Time taken to calculate Pi on {} core(s) was {:.2f} s.".format(cores, time_pi))
+ self.print_mining_info("Measuring the currently available CPU power...")
+ build_dir_funcional_tests = os.environ['FUNCTIONAL_TESTS_DIRECTORY']
+ time_pi = util_resources.get_time_pi_seconds(cores, build_dir_funcional_tests)
+ self.print_mining_info("Time taken to calculate Pi on {} core(s) was {:.2f} s.".format(cores, time_pi))
return time_pi
-
+
def get_available_ram(self):
available_ram = util_resources.available_ram_gb()
threshold_ram = 3
- print("Available RAM =", round(available_ram, 1), "GB")
+ self.print_mining_info("Available RAM = " + str(round(available_ram, 1)) + " GB")
if available_ram < threshold_ram:
print("Warning! Available RAM =", round(available_ram, 1),
"GB is less than the reasonable threshold =", threshold_ram,
@@ -240,8 +252,18 @@ class MiningTest():
res = daemon.get_height()
assert res.height == height + i + 1
assert res.hash == block_hash
-
+
+ def is_mining_silent(self):
+ return 'MINING_SILENT' in os.environ
+
+ def print_mining_info(self, msg):
+ if self.is_mining_silent():
+ return
+ print(msg)
+
def print_time_taken(self, start, msg_context):
+ if self.is_mining_silent():
+ return
seconds_passed = monotonic.monotonic() - start
print("Time taken for", msg_context, "=", round(seconds_passed, 1), "s.")
diff --git a/tests/functional_tests/util_resources.py b/tests/functional_tests/util_resources.py
index e45122e66..0ea96c129 100755
--- a/tests/functional_tests/util_resources.py
+++ b/tests/functional_tests/util_resources.py
@@ -43,8 +43,8 @@ def available_ram_gb():
ram_gb = ram_bytes / kilo**3
return ram_gb
-def get_time_pi_seconds(cores):
- app_path = './cpu_power_test'
+def get_time_pi_seconds(cores, app_dir='.'):
+ app_path = '{}/cpu_power_test'.format(app_dir)
time_calc = subprocess.check_output([app_path, str(cores)])
decoded = time_calc.decode('utf-8')
miliseconds = int(decoded)
diff --git a/tests/unit_tests/epee_boosted_tcp_server.cpp b/tests/unit_tests/epee_boosted_tcp_server.cpp
index 84fc0a29b..d10b2bb33 100644
--- a/tests/unit_tests/epee_boosted_tcp_server.cpp
+++ b/tests/unit_tests/epee_boosted_tcp_server.cpp
@@ -320,7 +320,8 @@ TEST(test_epee_connection, test_lifetime)
connection_ptr conn;
{
lock_guard_t guard(shared_conn->lock);
- conn = std::move(shared_conn->conn.lock());
+ conn = shared_conn->conn.lock();
+ shared_conn->conn.reset();
}
if (conn)
conn->cancel();
diff --git a/tests/unit_tests/json_serialization.cpp b/tests/unit_tests/json_serialization.cpp
index f76199e57..9fa589139 100644
--- a/tests/unit_tests/json_serialization.cpp
+++ b/tests/unit_tests/json_serialization.cpp
@@ -51,7 +51,7 @@ namespace test
if (!cryptonote::find_tx_extra_field_by_type(extra_fields, key_field))
throw std::runtime_error{"invalid transaction"};
- for (auto const& input : boost::adaptors::index(source.vout))
+ for (auto const input : boost::adaptors::index(source.vout))
{
source_amount += input.value().amount;
auto const& key = boost::get<cryptonote::txout_to_key>(input.value().target);
diff --git a/utils/build_scripts/android32.Dockerfile b/utils/build_scripts/android32.Dockerfile
index a2d0edbb3..c0931ce05 100644
--- a/utils/build_scripts/android32.Dockerfile
+++ b/utils/build_scripts/android32.Dockerfile
@@ -44,7 +44,7 @@ ARG BOOST_VERSION=1_68_0
ARG BOOST_VERSION_DOT=1.68.0
ARG BOOST_HASH=7f6130bc3cf65f56a618888ce9d5ea704fa10b462be126ad053e80e553d6d8b7
RUN set -ex \
- && curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://dl.bintray.com/boostorg/release/${BOOST_VERSION_DOT}/source/boost_${BOOST_VERSION}.tar.bz2 \
+ && curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION_DOT}/boost_${BOOST_VERSION}.tar.bz2 \
&& echo "${BOOST_HASH} boost_${BOOST_VERSION}.tar.bz2" | sha256sum -c \
&& tar -xvf boost_${BOOST_VERSION}.tar.bz2 \
&& rm -f boost_${BOOST_VERSION}.tar.bz2 \
diff --git a/utils/build_scripts/android64.Dockerfile b/utils/build_scripts/android64.Dockerfile
index eca2d4da1..3a62da464 100644
--- a/utils/build_scripts/android64.Dockerfile
+++ b/utils/build_scripts/android64.Dockerfile
@@ -44,7 +44,7 @@ ARG BOOST_VERSION=1_68_0
ARG BOOST_VERSION_DOT=1.68.0
ARG BOOST_HASH=7f6130bc3cf65f56a618888ce9d5ea704fa10b462be126ad053e80e553d6d8b7
RUN set -ex \
- && curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://dl.bintray.com/boostorg/release/${BOOST_VERSION_DOT}/source/boost_${BOOST_VERSION}.tar.bz2 \
+ && curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION_DOT}/boost_${BOOST_VERSION}.tar.bz2 \
&& echo "${BOOST_HASH} boost_${BOOST_VERSION}.tar.bz2" | sha256sum -c \
&& tar -xvf boost_${BOOST_VERSION}.tar.bz2 \
&& rm -f boost_${BOOST_VERSION}.tar.bz2 \
diff --git a/utils/gpg_keys/mj-xmr.asc b/utils/gpg_keys/mj-xmr.asc
new file mode 100644
index 000000000..e02d3e511
--- /dev/null
+++ b/utils/gpg_keys/mj-xmr.asc
@@ -0,0 +1,52 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBGBxrvMBEADD7dDjSfrtiuQJEuMMMGYt3wGQ1iJdiLWe4THJDbtPDCLdBxh4
+fpYGMVkUYT8LJFkf41JD/hAIKMNQHdw7qmZcfN9F/9U+jInHRYBM1dbA/mHs8Jhi
+YEGrD+v7c5fEuFXANW7z2uPjszw67tv3K5z8pFkt6K0pCgnxSKBu10WYkV0fvnxz
+e3naXAnWQQ/qeGP2xT2F3S+bn5GvCTPw1pI1HAaFcYVD+XZpNdKQHnHMWlDrQ0qe
+zymqrknjMmtD70Z7gEOogNxjd2QXUsQDq7XwxfHnhVZHHm98O95gh4Uw2Pve/TMI
+f0ItA+MYp+hDqN7wmq3TjNSVI9L02m/RgpO7cxpEgQ9QFYJo3dHC8cP7x/uqH3vI
+usYjOJtzq3UZWn4hnHRviEMYSDPtfFmO5CRElueVsRTiG0h7cyQYZ8HrEC9qDyR8
+vn0shqrUbcwsplyhwtpoyuJKcK/mFnmETUQ7ZF5NqgIlOaU7Du8WBt84YrDKRe5q
+TKMTtDEf4PrejqhYvb7GNOK3D/LJ/dZTJIdETZ2rRCCftgKjTLJr9fRSrQnxw5NQ
+8fnnK/O8JNpRvnDvR2HlmI8w/sgEDT99U4XgSi9BIUrM8HYkNykENXsk2BvEIGEN
+txXwZKDJmxK787+y4WX2JfbhFGPNCXxuwrXUqNhKy4Qit140Z7WH6UIvSwARAQAB
+tEdtai14bXIgKEJlY2F1c2UgV29ybGQgZG9taW5hdGlvbiBzaG91bGQgYmUgZnVu
+LikgPG1qeG1yQHByb3Rvbm1haWwuY29tPokCVAQTAQoAPhYhBLMwkesQonK53VcC
++sEBv5QJNFHgBQJgca7zAhsDBQkJZgGABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA
+AAoJEMEBv5QJNFHgPykP/0YOJYU0ap8BpAtG9Dfv46UPy77XMw2ag2AJ0MYPrqy6
+omjha15qLHeXxCr3CJNYa4EEvX0hahSGn4k3VmcDHVYYpD4f/RZfu8fBpk0xQtsv
+wF7zuAn0KSeg7maopdWfcPn02uTOm2crzTMMm5scXZ/YmMviUFPsgpyIkM5oCre6
+/RhIK953OUpgc/JKIlhpABA7KBTWUbc18BGHHdcPntVKrIxrIvzchVwI2P8KDQki
+NS33Fc9EoOlDuzbTkQldZJrsEsMZWKfzoNNlZsLtroaS6SlqQNllYjSEJx7ccjEU
++zvMGLXhxqL7XTFO1HdvacWwPsSPR8MXrwUFcKrDDzC57rHNPrHncPY1JwEkN0Aj
+V3VzTM+5QPvo6QQ+aNISxy9Hs470SpLDmmBl//Dn4qbFWSqAQ3qSD7S34MgW0FeC
+4L1/uTsDokkx+ZIf7WQUl2p4vi8/puFw2y3VLs4WGaUYko1gjhoFIDs3gP7YIeE9
+Lp+6/+RxMkuSGTrHM9rcpeClp3jEN/I7n/epKpJOSgroN+/RdbOOnVECC7+kWkM0
+U9fPi+O3fRIlaR+Q8NWkokgkbtw6gYV3bKnW78AnF5INs3rrtOH1WtGSp7ghKP4p
+7GE5cCIbUsB3dT6Ifo5tAi3faYFyhUZ2GsYNM0fsge10OwtPs+4YbSuurWUyIwuW
+uQINBGBxrvMBEACxiDPPivT0Tv4DWtkGZ7tHRF0IT9nSZuL5F+qMX4D3hef3RfVY
+4hxAQe2elvMU4PV/3neaRyiWSd4/Q8DnWrGQGvn3gxx8AHZTQwv/01Ae0InyNI9t
+pzpBCMvOY73dba1JRklEMMyJPAUrdvmyRTEuZEIAxoVlaKKSAr4oeaENGLwWxB4o
+NsAPxKNWw8drGQfJixMA+IeqeMzRjo6o5gJzrlaTb3+S78a1rD0pMmY7s6j3LqmF
+kd0L/JlGrs+ILCxXjddg6FRL/qUtvOZX+jdogb1hKvPFCMMc1BfUdq9WbrCpoj54
+OhBnO+yCHi2faUi0YP8Fumu6RTCDN+MK0nEji+peaJApHf15ZVd9DAX9NzRl/iLb
+sMjf4KBE2BI6FaUpdzkTbH6F713O2ZGWGEROBTjKucO3OkjGfHKf9rxdGRt37I/z
+b59xRzSn91Vz1nEXCmDSO/iPAGZ3bQKh+zSGwjwJsZrBRCvS34AhGaEs8un6nd9M
+FoMClpyjmm3W6WUZnELmcdlKhY2xbi5Yub21+mrjjjS6BuFpSYIC1xKJrCdpXtAm
+i8x5mcKbY24uRuqsY9pQrDHVXK/Pwj6+k9rGwbe5P6VkbZOpJ2pWSL68EoxIo1zp
+bv6moIJtxipydUOD2k76W0oGplexoxtW5f1WKcb3ykJRRrdPQxtPaPK14QARAQAB
+iQI8BBgBCgAmFiEEszCR6xCicrndVwL6wQG/lAk0UeAFAmBxrvMCGwwFCQlmAYAA
+CgkQwQG/lAk0UeClZhAAwQa+XO2GhCfZgrYOOSR5h2iib5k8OWiTsV68/DHOMsFJ
+1WVDsqjlNEOhPPg0CbH4FMHJOndcsjJb+Rs6Y35YsgTmQl1SHlJXZinV1AJ+IaW/
+Esx+qOpoGUzCzCrjGpwSYHxGSQ52WpLvTaHbosH3x2XXTZ2znOdwTlFB/36Yco3c
+ROWVLT/FhjugxTP0KddZR9Lu7ThK7txQhTQp5oBwWgpv0YAPaRPA+BpvrtzI7xSx
+2AOMtr7jiGAu0yFL5WestRy37UmunxS8Rd8PGILcfFBIvfqNTMwU55SW1nS9XbRh
+bUb/iil+Ly6lVsGAPW7iUgEUO7HtsPxeF+Bk/Hh+ZCHglQrGpzGcPmgbG3IiMF9H
+nISbh6sFrkDXjyLNP1zl6wg0cTyk12OcYo0nX4jp1CY3Oztd3rVdcMT8Qot4JSHj
+rDx+b3FTErVCnitEeALuBt/JSNH0VMcgOmFen9goJRVXcW/Dnsb+7QWXooj3aC++
+O6eghDN1YyJClQqLxvVThr7pdlMiWgWzHLmRkI2o84r14qOC15yTh7qr3Y5LOMPN
+px9K2f9f2l6KJ0w7vgKwa6pYiVf1AAw2Ym98LAJn+6v9+nRx9mSq9Nw+M5eo9DXW
+gclwPV1SSWUls8jjOmluI6/j8TE9JMBy5UKX1g6TOKJWW6wXcOUfzNVtG1oRNPA=
+=egkb
+-----END PGP PUBLIC KEY BLOCK-----